4638 lines
149 KiB
C
4638 lines
149 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright (C) 2018-2021, Intel Corporation. */
|
|
|
|
#include "ice_common.h"
|
|
#include "ice_flow.h"
|
|
#include "ice_dcf.h"
|
|
|
|
/* Size of known protocol header fields */
|
|
#define ICE_FLOW_FLD_SZ_ETH_TYPE 2
|
|
#define ICE_FLOW_FLD_SZ_VLAN 2
|
|
#define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
|
|
#define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
|
|
#define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
|
|
#define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
|
|
#define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
|
|
#define ICE_FLOW_FLD_SZ_IPV4_ID 2
|
|
#define ICE_FLOW_FLD_SZ_IPV6_ID 4
|
|
#define ICE_FLOW_FLD_SZ_IP_CHKSUM 2
|
|
#define ICE_FLOW_FLD_SZ_TCP_CHKSUM 2
|
|
#define ICE_FLOW_FLD_SZ_UDP_CHKSUM 2
|
|
#define ICE_FLOW_FLD_SZ_SCTP_CHKSUM 4
|
|
#define ICE_FLOW_FLD_SZ_IP_DSCP 1
|
|
#define ICE_FLOW_FLD_SZ_IP_TTL 1
|
|
#define ICE_FLOW_FLD_SZ_IP_PROT 1
|
|
#define ICE_FLOW_FLD_SZ_PORT 2
|
|
#define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
|
|
#define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
|
|
#define ICE_FLOW_FLD_SZ_ICMP_CODE 1
|
|
#define ICE_FLOW_FLD_SZ_ARP_OPER 2
|
|
#define ICE_FLOW_FLD_SZ_GRE_KEYID 4
|
|
#define ICE_FLOW_FLD_SZ_GTP_TEID 4
|
|
#define ICE_FLOW_FLD_SZ_GTP_QFI 2
|
|
#define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
|
|
#define ICE_FLOW_FLD_SZ_PFCP_SEID 8
|
|
#define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
|
|
#define ICE_FLOW_FLD_SZ_ESP_SPI 4
|
|
#define ICE_FLOW_FLD_SZ_AH_SPI 4
|
|
#define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
|
|
#define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
|
|
#define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
|
|
#define ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID 2
|
|
#define ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID 2
|
|
|
|
/* Describe properties of a protocol header field */
|
|
struct ice_flow_field_info {
|
|
enum ice_flow_seg_hdr hdr;
|
|
s16 off; /* Offset from start of a protocol header, in bits */
|
|
u16 size; /* Size of fields in bits */
|
|
u16 mask; /* 16-bit mask for field */
|
|
};
|
|
|
|
#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
|
|
.hdr = _hdr, \
|
|
.off = (_offset_bytes) * BITS_PER_BYTE, \
|
|
.size = (_size_bytes) * BITS_PER_BYTE, \
|
|
.mask = 0, \
|
|
}
|
|
|
|
#define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
|
|
.hdr = _hdr, \
|
|
.off = (_offset_bytes) * BITS_PER_BYTE, \
|
|
.size = (_size_bytes) * BITS_PER_BYTE, \
|
|
.mask = _mask, \
|
|
}
|
|
|
|
/* Table containing properties of supported protocol header fields */
|
|
static const
|
|
struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
|
|
/* Ether */
|
|
/* ICE_FLOW_FIELD_IDX_ETH_DA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
|
|
/* ICE_FLOW_FIELD_IDX_ETH_SA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
|
|
/* ICE_FLOW_FIELD_IDX_S_VLAN */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
|
|
/* ICE_FLOW_FIELD_IDX_C_VLAN */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
|
|
/* ICE_FLOW_FIELD_IDX_ETH_TYPE */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
|
|
/* IPv4 / IPv6 */
|
|
/* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
|
|
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
|
|
0x00fc),
|
|
/* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
|
|
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
|
|
0x0ff0),
|
|
/* ICE_FLOW_FIELD_IDX_IPV4_TTL */
|
|
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
|
|
ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
|
|
/* ICE_FLOW_FIELD_IDX_IPV4_PROT */
|
|
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
|
|
ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
|
|
/* ICE_FLOW_FIELD_IDX_IPV6_TTL */
|
|
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
|
|
ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
|
|
/* ICE_FLOW_FIELD_IDX_IPV6_PROT */
|
|
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
|
|
ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
|
|
/* ICE_FLOW_FIELD_IDX_IPV4_SA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
|
|
/* ICE_FLOW_FIELD_IDX_IPV4_DA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
|
|
/* ICE_FLOW_FIELD_IDX_IPV6_SA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
|
|
/* ICE_FLOW_FIELD_IDX_IPV6_DA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
|
|
/* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM),
|
|
/* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
|
|
ICE_FLOW_FLD_SZ_IPV4_ID),
|
|
/* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
|
|
ICE_FLOW_FLD_SZ_IPV6_ID),
|
|
/* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
|
|
ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
|
|
/* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
|
|
ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
|
|
/* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
|
|
ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
|
|
/* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
|
|
ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
|
|
/* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
|
|
ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
|
|
/* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
|
|
ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
|
|
/* Transport */
|
|
/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
|
|
/* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
|
|
/* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
|
|
/* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
|
|
/* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
|
|
/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
|
|
/* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
|
|
/* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM),
|
|
/* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM),
|
|
/* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8,
|
|
ICE_FLOW_FLD_SZ_SCTP_CHKSUM),
|
|
/* ARP */
|
|
/* ICE_FLOW_FIELD_IDX_ARP_SIP */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
|
|
/* ICE_FLOW_FIELD_IDX_ARP_DIP */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
|
|
/* ICE_FLOW_FIELD_IDX_ARP_SHA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
|
|
/* ICE_FLOW_FIELD_IDX_ARP_DHA */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
|
|
/* ICE_FLOW_FIELD_IDX_ARP_OP */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
|
|
/* ICMP */
|
|
/* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
|
|
/* ICE_FLOW_FIELD_IDX_ICMP_CODE */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
|
|
/* GRE */
|
|
/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
|
|
/* GTP */
|
|
/* ICE_FLOW_FIELD_IDX_GTPC_TEID */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
|
|
ICE_FLOW_FLD_SZ_GTP_TEID),
|
|
/* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
|
|
ICE_FLOW_FLD_SZ_GTP_TEID),
|
|
/* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
|
|
ICE_FLOW_FLD_SZ_GTP_TEID),
|
|
/* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
|
|
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
|
|
ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
|
|
/* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
|
|
ICE_FLOW_FLD_SZ_GTP_TEID),
|
|
/* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */
|
|
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22,
|
|
ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
|
|
/* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
|
|
ICE_FLOW_FLD_SZ_GTP_TEID),
|
|
/* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */
|
|
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22,
|
|
ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
|
|
/* PPPOE */
|
|
/* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
|
|
ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
|
|
/* PFCP */
|
|
/* ICE_FLOW_FIELD_IDX_PFCP_SEID */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
|
|
ICE_FLOW_FLD_SZ_PFCP_SEID),
|
|
/* L2TPV3 */
|
|
/* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
|
|
ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
|
|
/* ESP */
|
|
/* ICE_FLOW_FIELD_IDX_ESP_SPI */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
|
|
ICE_FLOW_FLD_SZ_ESP_SPI),
|
|
/* AH */
|
|
/* ICE_FLOW_FIELD_IDX_AH_SPI */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
|
|
ICE_FLOW_FLD_SZ_AH_SPI),
|
|
/* NAT_T_ESP */
|
|
/* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
|
|
ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
|
|
/* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
|
|
ICE_FLOW_FLD_SZ_VXLAN_VNI),
|
|
/* ECPRI_TP0 */
|
|
/* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
|
|
ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
|
|
/* UDP_ECPRI_TP0 */
|
|
/* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
|
|
ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
|
|
/* L2TPV2 */
|
|
/* ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 12,
|
|
ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID),
|
|
/* L2TPV2_LEN */
|
|
/* ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID */
|
|
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 14,
|
|
ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID),
|
|
};
|
|
|
|
/* Bitmaps indicating relevant packet types for a particular protocol header
|
|
*
|
|
* Packet types for packets with an Outer/First/Single MAC header
|
|
*/
|
|
static const u32 ice_ptypes_mac_ofos[] = {
|
|
0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
|
|
0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
|
|
0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
|
|
0xFFFFF000, 0x000003FF, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Innermost/Last MAC VLAN header */
|
|
static const u32 ice_ptypes_macvlan_il[] = {
|
|
0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
|
|
0x0000077E, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
|
|
* does NOT include IPV4 other PTYPEs
|
|
*/
|
|
static const u32 ice_ptypes_ipv4_ofos[] = {
|
|
0x1D800000, 0xBFBF7800, 0x000001DF, 0x00000000,
|
|
0x00000000, 0x00000155, 0x00000000, 0x00000000,
|
|
0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
|
|
0x00015000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
|
|
* includes IPV4 other PTYPEs
|
|
*/
|
|
static const u32 ice_ptypes_ipv4_ofos_all[] = {
|
|
0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000155, 0x00000000, 0x00000000,
|
|
0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
|
|
0x3FFD5000, 0x00000000, 0x02FBEFBC, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Innermost/Last IPv4 header */
|
|
static const u32 ice_ptypes_ipv4_il[] = {
|
|
0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
|
|
0x0000000E, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x001FF800, 0x00100000,
|
|
0xC0FC0000, 0x0000000F, 0xBC0BC0BC, 0x00000BC0,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
|
|
* does NOT include IVP6 other PTYPEs
|
|
*/
|
|
static const u32 ice_ptypes_ipv6_ofos[] = {
|
|
0x00000000, 0x00000000, 0x76000000, 0x10002000,
|
|
0x00000000, 0x000002AA, 0x00000000, 0x00000000,
|
|
0x00000000, 0x03F00000, 0x00000540, 0x00000000,
|
|
0x0002A000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
|
|
* includes IPV6 other PTYPEs
|
|
*/
|
|
static const u32 ice_ptypes_ipv6_ofos_all[] = {
|
|
0x00000000, 0x00000000, 0x76000000, 0xFEFDE000,
|
|
0x0000077E, 0x000002AA, 0x00000000, 0x00000000,
|
|
0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
|
|
0xC002A000, 0x000003FF, 0xBC000000, 0x0002FBEF,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Innermost/Last IPv6 header */
|
|
static const u32 ice_ptypes_ipv6_il[] = {
|
|
0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
|
|
0x00000770, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
|
|
0x3F000000, 0x000003F0, 0x02F02F00, 0x0002F02F,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Outer/First/Single
|
|
* non-frag IPv4 header - no L4
|
|
*/
|
|
static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
|
|
0x10800000, 0x04000800, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
|
|
static const u32 ice_ptypes_ipv4_il_no_l4[] = {
|
|
0x60000000, 0x18043008, 0x80000002, 0x6010c021,
|
|
0x00000008, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Outer/First/Single
|
|
* non-frag IPv6 header - no L4
|
|
*/
|
|
static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
|
|
0x00000000, 0x00000000, 0x42000000, 0x10002000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
|
|
static const u32 ice_ptypes_ipv6_il_no_l4[] = {
|
|
0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
|
|
0x00000430, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Outermost/First ARP header */
|
|
static const u32 ice_ptypes_arp_of[] = {
|
|
0x00000800, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* UDP Packet types for non-tunneled packets or tunneled
|
|
* packets with inner UDP.
|
|
*/
|
|
static const u32 ice_ptypes_udp_il[] = {
|
|
0x81000000, 0x20204040, 0x04000010, 0x80810102,
|
|
0x00000040, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00410000, 0x908427E0, 0x00100007,
|
|
0x0413F000, 0x00000041, 0x10410410, 0x00004104,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Innermost/Last TCP header */
|
|
static const u32 ice_ptypes_tcp_il[] = {
|
|
0x04000000, 0x80810102, 0x10000040, 0x02040408,
|
|
0x00000102, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00820000, 0x21084000, 0x00000000,
|
|
0x08200000, 0x00000082, 0x20820820, 0x00008208,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Innermost/Last SCTP header */
|
|
static const u32 ice_ptypes_sctp_il[] = {
|
|
0x08000000, 0x01020204, 0x20000081, 0x04080810,
|
|
0x00000204, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x01040000, 0x00000000, 0x00000000,
|
|
0x10400000, 0x00000104, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Outermost/First ICMP header */
|
|
static const u32 ice_ptypes_icmp_of[] = {
|
|
0x10000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Innermost/Last ICMP header */
|
|
static const u32 ice_ptypes_icmp_il[] = {
|
|
0x00000000, 0x02040408, 0x40000102, 0x08101020,
|
|
0x00000408, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x42108000, 0x00000000,
|
|
0x20800000, 0x00000208, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Outermost/First GRE header */
|
|
static const u32 ice_ptypes_gre_of[] = {
|
|
0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
|
|
0x0000017E, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with an Innermost/Last MAC header */
|
|
static const u32 ice_ptypes_mac_il[] = {
|
|
0x00000000, 0x20000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for GTPC */
|
|
static const u32 ice_ptypes_gtpc[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x000001E0, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for VXLAN with VNI */
|
|
static const u32 ice_ptypes_vxlan_vni[] = {
|
|
0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
|
|
0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for GTPC with TEID */
|
|
static const u32 ice_ptypes_gtpc_tid[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000060, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for GTPU */
|
|
static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
|
|
};
|
|
|
|
static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
|
|
};
|
|
|
|
static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
|
|
};
|
|
|
|
static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
{ MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
|
|
};
|
|
|
|
static const u32 ice_ptypes_gtpu[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
|
|
0x00000000, 0x0000003F, 0xBEFBEFBC, 0x0002FBEF,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for pppoe */
|
|
static const u32 ice_ptypes_pppoe[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with PFCP NODE header */
|
|
static const u32 ice_ptypes_pfcp_node[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x80000000, 0x00000002,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with PFCP SESSION header */
|
|
static const u32 ice_ptypes_pfcp_session[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000005,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for l2tpv3 */
|
|
static const u32 ice_ptypes_l2tpv3[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000300,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for esp */
|
|
static const u32 ice_ptypes_esp[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000003, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for ah */
|
|
static const u32 ice_ptypes_ah[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x0000000C, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Packet types for packets with NAT_T ESP header */
|
|
static const u32 ice_ptypes_nat_t_esp[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000030, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
static const u32 ice_ptypes_mac_non_ip_ofos[] = {
|
|
0x00000846, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
static const u32 ice_ptypes_gtpu_no_ip[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000600, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
static const u32 ice_ptypes_ecpri_tp0[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000400,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
static const u32 ice_ptypes_udp_ecpri_tp0[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00100000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
static const u32 ice_ptypes_l2tpv2[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0xFFFFF000, 0x000003FF, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
static const u32 ice_ptypes_ppp[] = {
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0xFFFF0000, 0x000003FF, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
static const u32 ice_ptypes_ipv4_frag[] = {
|
|
0x00400000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
static const u32 ice_ptypes_ipv6_frag[] = {
|
|
0x00000000, 0x00000000, 0x01000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
};
|
|
|
|
/* Manage parameters and info. used during the creation of a flow profile */
|
|
struct ice_flow_prof_params {
|
|
enum ice_block blk;
|
|
u16 entry_length; /* # of bytes formatted entry will require */
|
|
u8 es_cnt;
|
|
struct ice_flow_prof *prof;
|
|
|
|
/* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
|
|
* This will give us the direction flags.
|
|
*/
|
|
struct ice_fv_word es[ICE_MAX_FV_WORDS];
|
|
/* attributes can be used to add attributes to a particular PTYPE */
|
|
const struct ice_ptype_attributes *attr;
|
|
u16 attr_cnt;
|
|
|
|
u16 mask[ICE_MAX_FV_WORDS];
|
|
DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
|
|
};
|
|
|
|
#define ICE_FLOW_RSS_HDRS_INNER_MASK \
|
|
(ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
|
|
ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
|
|
ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
|
|
ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
|
|
ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
|
|
ICE_FLOW_SEG_HDR_VXLAN | ICE_FLOW_SEG_HDR_GRE | \
|
|
ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
|
|
ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP)
|
|
|
|
#define ICE_FLOW_SEG_HDRS_L3_MASK \
|
|
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
|
|
ICE_FLOW_SEG_HDR_ARP)
|
|
#define ICE_FLOW_SEG_HDRS_L4_MASK \
|
|
(ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
|
|
ICE_FLOW_SEG_HDR_SCTP)
|
|
/* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
|
|
#define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
|
|
(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
|
|
|
|
/**
|
|
* ice_flow_val_hdrs - validates packet segments for valid protocol headers
|
|
* @segs: array of one or more packet segments that describe the flow
|
|
* @segs_cnt: number of packet segments provided
|
|
*/
|
|
static int
|
|
ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
|
|
{
|
|
u8 i;
|
|
|
|
for (i = 0; i < segs_cnt; i++) {
|
|
/* Multiple L3 headers */
|
|
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
|
|
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
|
|
return -EINVAL;
|
|
|
|
/* Multiple L4 headers */
|
|
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
|
|
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Sizes of fixed known protocol headers without header options */
|
|
#define ICE_FLOW_PROT_HDR_SZ_MAC 14
|
|
#define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
|
|
#define ICE_FLOW_PROT_HDR_SZ_IPV4 20
|
|
#define ICE_FLOW_PROT_HDR_SZ_IPV6 40
|
|
#define ICE_FLOW_PROT_HDR_SZ_ARP 28
|
|
#define ICE_FLOW_PROT_HDR_SZ_ICMP 8
|
|
#define ICE_FLOW_PROT_HDR_SZ_TCP 20
|
|
#define ICE_FLOW_PROT_HDR_SZ_UDP 8
|
|
#define ICE_FLOW_PROT_HDR_SZ_SCTP 12
|
|
|
|
/**
|
|
* ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
|
|
* @params: information about the flow to be processed
|
|
* @seg: index of packet segment whose header size is to be determined
|
|
*/
|
|
static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
|
|
{
|
|
u16 sz;
|
|
|
|
/* L2 headers */
|
|
sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
|
|
ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
|
|
|
|
/* L3 headers */
|
|
if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
|
|
sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
|
|
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
|
|
sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
|
|
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
|
|
sz += ICE_FLOW_PROT_HDR_SZ_ARP;
|
|
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
|
|
/* A L3 header is required if L4 is specified */
|
|
return 0;
|
|
|
|
/* L4 headers */
|
|
if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
|
|
sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
|
|
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
|
|
sz += ICE_FLOW_PROT_HDR_SZ_TCP;
|
|
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
|
|
sz += ICE_FLOW_PROT_HDR_SZ_UDP;
|
|
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
|
|
sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
|
|
|
|
return sz;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
|
|
* @params: information about the flow to be processed
|
|
*
|
|
* This function identifies the packet types associated with the protocol
|
|
* headers being present in packet segments of the specified flow profile.
|
|
*/
|
|
static int
|
|
ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
|
|
{
|
|
struct ice_flow_prof *prof;
|
|
u8 i;
|
|
|
|
memset(params->ptypes, 0xff, sizeof(params->ptypes));
|
|
|
|
prof = params->prof;
|
|
|
|
for (i = 0; i < params->prof->segs_cnt; i++) {
|
|
const unsigned long *src;
|
|
u32 hdrs;
|
|
|
|
hdrs = prof->segs[i].hdrs;
|
|
|
|
if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
|
|
src = !i ? (const unsigned long *)ice_ptypes_mac_ofos :
|
|
(const unsigned long *)ice_ptypes_mac_il;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
}
|
|
|
|
if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
|
|
src = (const unsigned long *)ice_ptypes_macvlan_il;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
}
|
|
|
|
if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
|
|
bitmap_and(params->ptypes, params->ptypes,
|
|
(const unsigned long *)ice_ptypes_arp_of,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
}
|
|
|
|
if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
|
|
src = (const unsigned long *)ice_ptypes_ecpri_tp0;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
}
|
|
|
|
if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
|
|
(hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
|
|
src = i ?
|
|
(const unsigned long *)ice_ptypes_ipv4_il :
|
|
(const unsigned long *)ice_ptypes_ipv4_ofos_all;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
|
|
(hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
|
|
src = i ?
|
|
(const unsigned long *)ice_ptypes_ipv6_il :
|
|
(const unsigned long *)ice_ptypes_ipv6_ofos_all;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
|
|
(hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
|
|
src = (const unsigned long *)ice_ptypes_ipv4_frag;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
|
|
(hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
|
|
src = (const unsigned long *)ice_ptypes_ipv6_frag;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
|
|
!(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
|
|
src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos_no_l4 :
|
|
(const unsigned long *)ice_ptypes_ipv4_il_no_l4;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
|
|
src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
|
|
(const unsigned long *)ice_ptypes_ipv4_il;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
|
|
!(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
|
|
src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos_no_l4 :
|
|
(const unsigned long *)ice_ptypes_ipv6_il_no_l4;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
|
|
src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
|
|
(const unsigned long *)ice_ptypes_ipv6_il;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
}
|
|
|
|
if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
|
|
src = (const unsigned long *)ice_ptypes_mac_non_ip_ofos;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
|
|
src = (const unsigned long *)ice_ptypes_pppoe;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else {
|
|
src = (const unsigned long *)ice_ptypes_pppoe;
|
|
bitmap_andnot(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
}
|
|
if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
|
|
src = (const unsigned long *)ice_ptypes_udp_il;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
|
|
bitmap_and(params->ptypes, params->ptypes,
|
|
(const unsigned long *)ice_ptypes_tcp_il,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
|
|
src = (const unsigned long *)ice_ptypes_sctp_il;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
}
|
|
|
|
if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
|
|
src = !i ? (const unsigned long *)ice_ptypes_icmp_of :
|
|
(const unsigned long *)ice_ptypes_icmp_il;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
|
|
src = (const unsigned long *)ice_ptypes_gre_of;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
|
|
src = (const unsigned long *)ice_ptypes_gtpc;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
|
|
src = (const unsigned long *)ice_ptypes_gtpc_tid;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
|
|
src = (const unsigned long *)ice_ptypes_gtpu_no_ip;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
|
|
src = (const unsigned long *)ice_ptypes_gtpu;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
|
|
/* Attributes for GTP packet with downlink */
|
|
params->attr = ice_attr_gtpu_down;
|
|
params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
|
|
src = (const unsigned long *)ice_ptypes_gtpu;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
|
|
/* Attributes for GTP packet with uplink */
|
|
params->attr = ice_attr_gtpu_up;
|
|
params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
|
|
src = (const unsigned long *)ice_ptypes_gtpu;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
|
|
/* Attributes for GTP packet with Extension Header */
|
|
params->attr = ice_attr_gtpu_eh;
|
|
params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
|
|
src = (const unsigned long *)ice_ptypes_gtpu;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
|
|
/* Attributes for GTP packet without Extension Header */
|
|
params->attr = ice_attr_gtpu_session;
|
|
params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
|
|
src = (const unsigned long *)ice_ptypes_l2tpv2;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
|
|
src = (const unsigned long *)ice_ptypes_l2tpv3;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
|
|
src = (const unsigned long *)ice_ptypes_esp;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
|
|
src = (const unsigned long *)ice_ptypes_ah;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
|
|
src = (const unsigned long *)ice_ptypes_nat_t_esp;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
|
|
src = (const unsigned long *)ice_ptypes_vxlan_vni;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
|
|
src = (const unsigned long *)ice_ptypes_udp_ecpri_tp0;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
}
|
|
|
|
if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
|
|
src = (const unsigned long *)ice_ptypes_ppp;
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
}
|
|
|
|
if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
|
|
if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
|
|
src =
|
|
(const unsigned long *)ice_ptypes_pfcp_node;
|
|
else
|
|
src =
|
|
(const unsigned long *)ice_ptypes_pfcp_session;
|
|
|
|
bitmap_and(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
} else {
|
|
src = (const unsigned long *)ice_ptypes_pfcp_node;
|
|
bitmap_andnot(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
|
|
src = (const unsigned long *)ice_ptypes_pfcp_session;
|
|
bitmap_andnot(params->ptypes, params->ptypes, src,
|
|
ICE_FLOW_PTYPE_MAX);
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
|
|
* @hw: pointer to the HW struct
|
|
* @params: information about the flow to be processed
|
|
* @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
|
|
*
|
|
* This function will allocate an extraction sequence entries for a DWORD size
|
|
* chunk of the packet flags.
|
|
*/
|
|
static int
|
|
ice_flow_xtract_pkt_flags(struct ice_hw *hw,
|
|
struct ice_flow_prof_params *params,
|
|
enum ice_flex_mdid_pkt_flags flags)
|
|
{
|
|
u8 fv_words = (u8)hw->blk[params->blk].es.fvw;
|
|
u8 idx;
|
|
|
|
/* Make sure the number of extraction sequence entries required does not
|
|
* exceed the block's capacity.
|
|
*/
|
|
if (params->es_cnt >= fv_words)
|
|
return -ENOSPC;
|
|
|
|
/* some blocks require a reversed field vector layout */
|
|
if (hw->blk[params->blk].es.reverse)
|
|
idx = fv_words - params->es_cnt - 1;
|
|
else
|
|
idx = params->es_cnt;
|
|
|
|
params->es[idx].prot_id = ICE_PROT_META_ID;
|
|
params->es[idx].off = (u16)flags;
|
|
params->es_cnt++;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_xtract_fld - Create an extraction sequence entry for the given field
|
|
* @hw: pointer to the HW struct
|
|
* @params: information about the flow to be processed
|
|
* @seg: packet segment index of the field to be extracted
|
|
* @fld: ID of field to be extracted
|
|
* @match: bitfield of all fields
|
|
*
|
|
* This function determines the protocol ID, offset, and size of the given
|
|
* field. It then allocates one or more extraction sequence entries for the
|
|
* given field, and fill the entries with protocol ID and offset information.
|
|
*/
|
|
static int
|
|
ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
|
|
u8 seg, enum ice_flow_field fld, u64 match)
|
|
{
|
|
enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
|
|
u8 fv_words = (u8)hw->blk[params->blk].es.fvw;
|
|
enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
|
|
struct ice_flow_fld_info *flds;
|
|
u16 cnt, ese_bits, i;
|
|
u16 sib_mask = 0;
|
|
u16 mask;
|
|
u16 off;
|
|
|
|
flds = params->prof->segs[seg].fields;
|
|
|
|
switch (fld) {
|
|
case ICE_FLOW_FIELD_IDX_ETH_DA:
|
|
case ICE_FLOW_FIELD_IDX_ETH_SA:
|
|
case ICE_FLOW_FIELD_IDX_S_VLAN:
|
|
case ICE_FLOW_FIELD_IDX_C_VLAN:
|
|
prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_ETH_TYPE:
|
|
prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
|
|
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
|
|
prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_IPV4_TTL:
|
|
case ICE_FLOW_FIELD_IDX_IPV4_PROT:
|
|
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
|
|
if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
|
|
params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
|
|
seg == 1)
|
|
prot_id = ICE_PROT_IPV4_IL_IL;
|
|
/* TTL and PROT share the same extraction seq. entry.
|
|
* Each is considered a sibling to the other in terms of sharing
|
|
* the same extraction sequence entry.
|
|
*/
|
|
if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
|
|
sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
|
|
else
|
|
sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
|
|
|
|
/* If the sibling field is also included, that field's
|
|
* mask needs to be included.
|
|
*/
|
|
if (match & BIT(sib))
|
|
sib_mask = ice_flds_info[sib].mask;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_IPV6_TTL:
|
|
case ICE_FLOW_FIELD_IDX_IPV6_PROT:
|
|
prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
|
|
if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
|
|
params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
|
|
seg == 1)
|
|
prot_id = ICE_PROT_IPV6_IL_IL;
|
|
/* TTL and PROT share the same extraction seq. entry.
|
|
* Each is considered a sibling to the other in terms of sharing
|
|
* the same extraction sequence entry.
|
|
*/
|
|
if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
|
|
sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
|
|
else
|
|
sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
|
|
|
|
/* If the sibling field is also included, that field's
|
|
* mask needs to be included.
|
|
*/
|
|
if (match & BIT(sib))
|
|
sib_mask = ice_flds_info[sib].mask;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_IPV4_SA:
|
|
case ICE_FLOW_FIELD_IDX_IPV4_DA:
|
|
case ICE_FLOW_FIELD_IDX_IPV4_CHKSUM:
|
|
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
|
|
if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
|
|
params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
|
|
seg == 1)
|
|
prot_id = ICE_PROT_IPV4_IL_IL;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_IPV4_ID:
|
|
prot_id = ICE_PROT_IPV4_OF_OR_S;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_IPV6_SA:
|
|
case ICE_FLOW_FIELD_IDX_IPV6_DA:
|
|
case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
|
|
case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
|
|
case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
|
|
case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
|
|
case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
|
|
case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
|
|
prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
|
|
if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
|
|
params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
|
|
seg == 1)
|
|
prot_id = ICE_PROT_IPV6_IL_IL;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_IPV6_ID:
|
|
prot_id = ICE_PROT_IPV6_FRAG;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
|
|
case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
|
|
case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
|
|
case ICE_FLOW_FIELD_IDX_TCP_CHKSUM:
|
|
prot_id = ICE_PROT_TCP_IL;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
|
|
case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
|
|
case ICE_FLOW_FIELD_IDX_UDP_CHKSUM:
|
|
prot_id = ICE_PROT_UDP_IL_OR_S;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
|
|
case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
|
|
case ICE_FLOW_FIELD_IDX_SCTP_CHKSUM:
|
|
prot_id = ICE_PROT_SCTP_IL;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
|
|
case ICE_FLOW_FIELD_IDX_GTPC_TEID:
|
|
case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
|
|
case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
|
|
case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
|
|
case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
|
|
case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
|
|
case ICE_FLOW_FIELD_IDX_GTPU_UP_QFI:
|
|
case ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI:
|
|
case ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID:
|
|
case ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID:
|
|
/* GTP is accessed through UDP OF protocol */
|
|
prot_id = ICE_PROT_UDP_OF;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
|
|
prot_id = ICE_PROT_PPPOE;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_PFCP_SEID:
|
|
prot_id = ICE_PROT_UDP_IL_OR_S;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
|
|
prot_id = ICE_PROT_L2TPV3;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_ESP_SPI:
|
|
prot_id = ICE_PROT_ESP_F;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_AH_SPI:
|
|
prot_id = ICE_PROT_ESP_2;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
|
|
prot_id = ICE_PROT_UDP_IL_OR_S;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
|
|
prot_id = ICE_PROT_ECPRI;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
|
|
prot_id = ICE_PROT_UDP_IL_OR_S;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_ARP_SIP:
|
|
case ICE_FLOW_FIELD_IDX_ARP_DIP:
|
|
case ICE_FLOW_FIELD_IDX_ARP_SHA:
|
|
case ICE_FLOW_FIELD_IDX_ARP_DHA:
|
|
case ICE_FLOW_FIELD_IDX_ARP_OP:
|
|
prot_id = ICE_PROT_ARP_OF;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
|
|
case ICE_FLOW_FIELD_IDX_ICMP_CODE:
|
|
/* ICMP type and code share the same extraction seq. entry */
|
|
prot_id = (params->prof->segs[seg].hdrs &
|
|
ICE_FLOW_SEG_HDR_IPV4) ?
|
|
ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
|
|
sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
|
|
ICE_FLOW_FIELD_IDX_ICMP_CODE :
|
|
ICE_FLOW_FIELD_IDX_ICMP_TYPE;
|
|
break;
|
|
case ICE_FLOW_FIELD_IDX_GRE_KEYID:
|
|
prot_id = ICE_PROT_GRE_OF;
|
|
break;
|
|
default:
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
/* Each extraction sequence entry is a word in size, and extracts a
|
|
* word-aligned offset from a protocol header.
|
|
*/
|
|
ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
|
|
|
|
flds[fld].xtrct.prot_id = (u8)prot_id;
|
|
flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
|
|
ICE_FLOW_FV_EXTRACT_SZ;
|
|
flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
|
|
flds[fld].xtrct.idx = params->es_cnt;
|
|
flds[fld].xtrct.mask = ice_flds_info[fld].mask;
|
|
|
|
/* Adjust the next field-entry index after accommodating the number of
|
|
* entries this field consumes
|
|
*/
|
|
cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
|
|
ese_bits);
|
|
|
|
/* Fill in the extraction sequence entries needed for this field */
|
|
off = flds[fld].xtrct.off;
|
|
mask = flds[fld].xtrct.mask;
|
|
for (i = 0; i < cnt; i++) {
|
|
/* Only consume an extraction sequence entry if there is no
|
|
* sibling field associated with this field or the sibling entry
|
|
* already extracts the word shared with this field.
|
|
*/
|
|
if (sib == ICE_FLOW_FIELD_IDX_MAX ||
|
|
flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
|
|
flds[sib].xtrct.off != off) {
|
|
u8 idx;
|
|
|
|
/* Make sure the number of extraction sequence required
|
|
* does not exceed the block's capability
|
|
*/
|
|
if (params->es_cnt >= fv_words)
|
|
return -ENOSPC;
|
|
|
|
/* some blocks require a reversed field vector layout */
|
|
if (hw->blk[params->blk].es.reverse)
|
|
idx = fv_words - params->es_cnt - 1;
|
|
else
|
|
idx = params->es_cnt;
|
|
|
|
params->es[idx].prot_id = (u8)prot_id;
|
|
params->es[idx].off = off;
|
|
params->mask[idx] = mask | sib_mask;
|
|
params->es_cnt++;
|
|
}
|
|
|
|
off += ICE_FLOW_FV_EXTRACT_SZ;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_xtract_raws - Create extract sequence entries for raw bytes
|
|
* @hw: pointer to the HW struct
|
|
* @params: information about the flow to be processed
|
|
* @seg: index of packet segment whose raw fields are to be extracted
|
|
*/
|
|
static int
|
|
ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
|
|
u8 seg)
|
|
{
|
|
u16 fv_words;
|
|
u16 hdrs_sz;
|
|
u8 i;
|
|
|
|
if (!params->prof->segs[seg].raws_cnt)
|
|
return 0;
|
|
|
|
if (params->prof->segs[seg].raws_cnt >
|
|
ARRAY_SIZE(params->prof->segs[seg].raws))
|
|
return -ENOSPC;
|
|
|
|
/* Offsets within the segment headers are not supported */
|
|
hdrs_sz = ice_flow_calc_seg_sz(params, seg);
|
|
if (!hdrs_sz)
|
|
return -EINVAL;
|
|
|
|
fv_words = hw->blk[params->blk].es.fvw;
|
|
|
|
for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
|
|
struct ice_flow_seg_fld_raw *raw;
|
|
u16 off, cnt, j;
|
|
|
|
raw = ¶ms->prof->segs[seg].raws[i];
|
|
|
|
/* Storing extraction information */
|
|
raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
|
|
raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
|
|
ICE_FLOW_FV_EXTRACT_SZ;
|
|
raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
|
|
BITS_PER_BYTE;
|
|
raw->info.xtrct.idx = params->es_cnt;
|
|
|
|
/* Determine the number of field vector entries this raw field
|
|
* consumes.
|
|
*/
|
|
cnt = DIV_ROUND_UP(raw->info.xtrct.disp + (raw->info.src.last * BITS_PER_BYTE),
|
|
(ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
|
|
off = raw->info.xtrct.off;
|
|
for (j = 0; j < cnt; j++) {
|
|
u16 idx;
|
|
|
|
/* Make sure the number of extraction sequence required
|
|
* does not exceed the block's capability
|
|
*/
|
|
if (params->es_cnt >= hw->blk[params->blk].es.count ||
|
|
params->es_cnt >= ICE_MAX_FV_WORDS)
|
|
return -ENOSPC;
|
|
|
|
/* some blocks require a reversed field vector layout */
|
|
if (hw->blk[params->blk].es.reverse)
|
|
idx = fv_words - params->es_cnt - 1;
|
|
else
|
|
idx = params->es_cnt;
|
|
|
|
params->es[idx].prot_id = raw->info.xtrct.prot_id;
|
|
params->es[idx].off = off;
|
|
params->es_cnt++;
|
|
off += ICE_FLOW_FV_EXTRACT_SZ;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
|
|
* @hw: pointer to the HW struct
|
|
* @params: information about the flow to be processed
|
|
*
|
|
* This function iterates through all matched fields in the given segments, and
|
|
* creates an extraction sequence for the fields.
|
|
*/
|
|
static int
|
|
ice_flow_create_xtrct_seq(struct ice_hw *hw,
|
|
struct ice_flow_prof_params *params)
|
|
{
|
|
int status = 0;
|
|
u8 i;
|
|
|
|
/* For ACL, we also need to extract the direction bit (Rx,Tx) data from
|
|
* packet flags
|
|
*/
|
|
if (params->blk == ICE_BLK_ACL) {
|
|
status = ice_flow_xtract_pkt_flags(hw, params,
|
|
ICE_RX_MDID_PKT_FLAGS_15_0);
|
|
if (status)
|
|
return status;
|
|
}
|
|
|
|
for (i = 0; i < params->prof->segs_cnt; i++) {
|
|
u64 match = params->prof->segs[i].match;
|
|
enum ice_flow_field j;
|
|
|
|
for_each_set_bit(j, (unsigned long *)&match,
|
|
ICE_FLOW_FIELD_IDX_MAX) {
|
|
status = ice_flow_xtract_fld(hw, params, i, j, match);
|
|
if (status)
|
|
return status;
|
|
clear_bit(j, (unsigned long *)&match);
|
|
}
|
|
|
|
/* Process raw matching bytes */
|
|
status = ice_flow_xtract_raws(hw, params, i);
|
|
if (status)
|
|
return status;
|
|
}
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_sel_acl_scen - returns the specific scenario
|
|
* @hw: pointer to the hardware structure
|
|
* @params: information about the flow to be processed
|
|
*
|
|
* This function will return the specific scenario based on the
|
|
* params passed to it
|
|
*/
|
|
static int
|
|
ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
|
|
{
|
|
/* Find the best-fit scenario for the provided match width */
|
|
struct ice_acl_scen *cand_scen = NULL, *scen;
|
|
|
|
if (!hw->acl_tbl)
|
|
return -ENOENT;
|
|
|
|
/* Loop through each scenario and match against the scenario width
|
|
* to select the specific scenario
|
|
*/
|
|
list_for_each_entry(scen, &hw->acl_tbl->scens, list_entry)
|
|
if (scen->eff_width >= params->entry_length &&
|
|
(!cand_scen || cand_scen->eff_width > scen->eff_width))
|
|
cand_scen = scen;
|
|
if (!cand_scen)
|
|
return -ENOENT;
|
|
|
|
params->prof->cfg.scen = cand_scen;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
|
|
* @params: information about the flow to be processed
|
|
*/
|
|
static int
|
|
ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
|
|
{
|
|
u16 index, i, range_idx = 0;
|
|
|
|
index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
|
|
|
|
for (i = 0; i < params->prof->segs_cnt; i++) {
|
|
struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
|
|
u16 j;
|
|
|
|
for_each_set_bit(j, (unsigned long *)&seg->match,
|
|
(u16)ICE_FLOW_FIELD_IDX_MAX) {
|
|
struct ice_flow_fld_info *fld = &seg->fields[j];
|
|
|
|
fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
|
|
|
|
if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
|
|
fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
|
|
|
|
/* Range checking only supported for single
|
|
* words
|
|
*/
|
|
if (DIV_ROUND_UP(ice_flds_info[j].size + fld->xtrct.disp, BITS_PER_BYTE * 2) > 1)
|
|
return -EINVAL;
|
|
|
|
/* Ranges must define low and high values */
|
|
if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
|
|
fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
|
|
return -EINVAL;
|
|
|
|
fld->entry.val = range_idx++;
|
|
} else {
|
|
/* Store adjusted byte-length of field for later
|
|
* use, taking into account potential
|
|
* non-byte-aligned displacement
|
|
*/
|
|
fld->entry.last = DIV_ROUND_UP(ice_flds_info[j].size + (fld->xtrct.disp % BITS_PER_BYTE),
|
|
BITS_PER_BYTE);
|
|
fld->entry.val = index;
|
|
index += fld->entry.last;
|
|
}
|
|
}
|
|
|
|
for (j = 0; j < seg->raws_cnt; j++) {
|
|
struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
|
|
|
|
raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
|
|
raw->info.entry.val = index;
|
|
raw->info.entry.last = raw->info.src.last;
|
|
index += raw->info.entry.last;
|
|
}
|
|
}
|
|
|
|
/* Currently only support using the byte selection base, which only
|
|
* allows for an effective entry size of 30 bytes. Reject anything
|
|
* larger.
|
|
*/
|
|
if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
|
|
return -EINVAL;
|
|
|
|
/* Only 8 range checkers per profile, reject anything trying to use
|
|
* more
|
|
*/
|
|
if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
|
|
return -EINVAL;
|
|
|
|
/* Store # bytes required for entry for later use */
|
|
params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_proc_segs - process all packet segments associated with a profile
|
|
* @hw: pointer to the HW struct
|
|
* @params: information about the flow to be processed
|
|
*/
|
|
static int
|
|
ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
|
|
{
|
|
int status;
|
|
|
|
status = ice_flow_proc_seg_hdrs(params);
|
|
if (status)
|
|
return status;
|
|
|
|
status = ice_flow_create_xtrct_seq(hw, params);
|
|
if (status)
|
|
return status;
|
|
|
|
switch (params->blk) {
|
|
case ICE_BLK_FD:
|
|
case ICE_BLK_RSS:
|
|
status = 0;
|
|
break;
|
|
case ICE_BLK_ACL:
|
|
status = ice_flow_acl_def_entry_frmt(params);
|
|
if (status)
|
|
return status;
|
|
status = ice_flow_sel_acl_scen(hw, params);
|
|
if (status)
|
|
return status;
|
|
break;
|
|
default:
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
return status;
|
|
}
|
|
|
|
#define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
|
|
#define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
|
|
#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
|
|
|
|
/**
|
|
* ice_flow_find_prof_conds - Find a profile matching headers and conditions
|
|
* @hw: pointer to the HW struct
|
|
* @blk: classification stage
|
|
* @dir: flow direction
|
|
* @segs: array of one or more packet segments that describe the flow
|
|
* @segs_cnt: number of packet segments provided
|
|
* @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
|
|
* @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
|
|
*/
|
|
static struct ice_flow_prof *
|
|
ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
|
|
enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
|
|
u8 segs_cnt, u16 vsi_handle, u32 conds)
|
|
{
|
|
struct ice_flow_prof *p, *prof = NULL;
|
|
|
|
mutex_lock(&hw->fl_profs_locks[blk]);
|
|
list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
|
|
if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
|
|
segs_cnt && segs_cnt == p->segs_cnt) {
|
|
u8 i;
|
|
|
|
/* Check for profile-VSI association if specified */
|
|
if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
|
|
ice_is_vsi_valid(hw, vsi_handle) &&
|
|
!test_bit(vsi_handle, p->vsis))
|
|
continue;
|
|
|
|
/* Protocol headers must be checked. Matched fields are
|
|
* checked if specified.
|
|
*/
|
|
for (i = 0; i < segs_cnt; i++)
|
|
if (segs[i].hdrs != p->segs[i].hdrs ||
|
|
((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
|
|
segs[i].match != p->segs[i].match))
|
|
break;
|
|
|
|
/* A match is found if all segments are matched */
|
|
if (i == segs_cnt) {
|
|
prof = p;
|
|
break;
|
|
}
|
|
}
|
|
mutex_unlock(&hw->fl_profs_locks[blk]);
|
|
|
|
return prof;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_find_prof - Look up a profile matching headers and matched fields
|
|
* @hw: pointer to the HW struct
|
|
* @blk: classification stage
|
|
* @dir: flow direction
|
|
* @segs: array of one or more packet segments that describe the flow
|
|
* @segs_cnt: number of packet segments provided
|
|
*/
|
|
u64
|
|
ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
|
|
struct ice_flow_seg_info *segs, u8 segs_cnt)
|
|
{
|
|
struct ice_flow_prof *p;
|
|
|
|
p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
|
|
ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
|
|
|
|
return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_find_prof_id - Look up a profile with given profile ID
|
|
* @hw: pointer to the HW struct
|
|
* @blk: classification stage
|
|
* @prof_id: unique ID to identify this flow profile
|
|
*/
|
|
static struct ice_flow_prof *
|
|
ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
|
|
{
|
|
struct ice_flow_prof *p;
|
|
|
|
list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
|
|
if (p->id == prof_id)
|
|
return p;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* ice_dealloc_flow_entry - Deallocate flow entry memory
|
|
* @hw: pointer to the HW struct
|
|
* @entry: flow entry to be removed
|
|
*/
|
|
static void
|
|
ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
|
|
{
|
|
if (!entry)
|
|
return;
|
|
|
|
if (entry->entry)
|
|
devm_kfree(ice_hw_to_dev(hw), entry->entry);
|
|
|
|
if (entry->range_buf) {
|
|
devm_kfree(ice_hw_to_dev(hw), entry->range_buf);
|
|
entry->range_buf = NULL;
|
|
}
|
|
|
|
if (entry->acts) {
|
|
devm_kfree(ice_hw_to_dev(hw), entry->acts);
|
|
entry->acts = NULL;
|
|
entry->acts_cnt = 0;
|
|
}
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), entry);
|
|
}
|
|
|
|
/**
|
|
* ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
|
|
* @hw: pointer to the HW struct
|
|
* @blk: classification stage
|
|
* @prof_id: the profile ID handle
|
|
* @hw_prof_id: pointer to variable to receive the HW profile ID
|
|
*/
|
|
int
|
|
ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
|
|
u8 *hw_prof_id)
|
|
{
|
|
struct ice_prof_map *map;
|
|
int status = -ENOENT;
|
|
|
|
mutex_lock(&hw->blk[blk].es.prof_map_lock);
|
|
map = ice_search_prof_id(hw, blk, prof_id);
|
|
if (map) {
|
|
*hw_prof_id = map->prof_id;
|
|
status = 0;
|
|
}
|
|
mutex_unlock(&hw->blk[blk].es.prof_map_lock);
|
|
return status;
|
|
}
|
|
|
|
#define ICE_ACL_INVALID_SCEN 0x3f
|
|
|
|
/**
|
|
* ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
|
|
* @hw: pointer to the hardware structure
|
|
* @prof: pointer to flow profile
|
|
* @buf: destination buffer function writes partial extraction sequence to
|
|
*
|
|
* returns 0 if no PF is associated to the given profile
|
|
* returns -EBUSY if at least one PF is associated to the given profile
|
|
* returns other error code for real error
|
|
*/
|
|
static int
|
|
ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
|
|
struct ice_aqc_acl_prof_generic_frmt *buf)
|
|
{
|
|
u8 prof_id = 0;
|
|
int status;
|
|
|
|
status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
|
|
if (status)
|
|
return status;
|
|
|
|
status = ice_query_acl_prof(hw, prof_id, buf, NULL);
|
|
if (status)
|
|
return status;
|
|
|
|
/* If all PF's associated scenarios are all 0 or all
|
|
* ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
|
|
* not been configured yet.
|
|
*/
|
|
if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
|
|
buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
|
|
buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
|
|
buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
|
|
return 0;
|
|
|
|
if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
|
|
buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
|
|
buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
|
|
buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
|
|
buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
|
|
buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
|
|
buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
|
|
buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
|
|
return 0;
|
|
|
|
return -EBUSY;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_free_act_cntr - Free the ACL rule's actions
|
|
* @hw: pointer to the hardware structure
|
|
* @acts: array of actions to be performed on a match
|
|
* @acts_cnt: number of actions
|
|
*/
|
|
static int
|
|
ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
|
|
u8 acts_cnt)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < acts_cnt; i++) {
|
|
if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
|
|
acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
|
|
acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
|
|
struct ice_acl_cntrs cntrs = { 0 };
|
|
int status;
|
|
|
|
/* amount is unused in the dealloc path but the common
|
|
* parameter check routine wants a value set, as zero
|
|
* is invalid for the check. Just set it.
|
|
*/
|
|
cntrs.amount = 1;
|
|
cntrs.bank = 0; /* Only bank0 for the moment */
|
|
cntrs.first_cntr =
|
|
le16_to_cpu(acts[i].data.acl_act.value);
|
|
cntrs.last_cntr =
|
|
le16_to_cpu(acts[i].data.acl_act.value);
|
|
|
|
if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
|
|
cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
|
|
else
|
|
cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
|
|
|
|
status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
|
|
if (status)
|
|
return status;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
|
|
* @hw: pointer to the hardware structure
|
|
* @prof: pointer to flow profile
|
|
*
|
|
* Disassociate the scenario from the profile for the PF of the VSI.
|
|
*/
|
|
static int
|
|
ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
|
|
{
|
|
struct ice_aqc_acl_prof_generic_frmt buf;
|
|
int status = 0;
|
|
u8 prof_id = 0;
|
|
|
|
memset(&buf, 0, sizeof(buf));
|
|
|
|
status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
|
|
if (status)
|
|
return status;
|
|
|
|
status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
|
|
if (status)
|
|
return status;
|
|
|
|
/* Clear scenario for this PF */
|
|
buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
|
|
status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_rem_entry_sync - Remove a flow entry
|
|
* @hw: pointer to the HW struct
|
|
* @blk: classification stage
|
|
* @entry: flow entry to be removed
|
|
*/
|
|
static int
|
|
ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
|
|
struct ice_flow_entry *entry)
|
|
{
|
|
if (!entry)
|
|
return -EINVAL;
|
|
|
|
if (blk == ICE_BLK_ACL) {
|
|
int status;
|
|
|
|
if (ice_dcf_is_acl_capable(hw))
|
|
return -EBUSY;
|
|
if (!entry->prof)
|
|
return -EINVAL;
|
|
|
|
status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
|
|
entry->scen_entry_idx);
|
|
if (status)
|
|
return status;
|
|
|
|
/* Checks if we need to release an ACL counter. */
|
|
if (entry->acts_cnt && entry->acts)
|
|
ice_flow_acl_free_act_cntr(hw, entry->acts,
|
|
entry->acts_cnt);
|
|
}
|
|
|
|
list_del(&entry->l_entry);
|
|
|
|
ice_dealloc_flow_entry(hw, entry);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
|
|
* @hw: pointer to the HW struct
|
|
* @blk: classification stage
|
|
* @dir: flow direction
|
|
* @prof_id: unique ID to identify this flow profile
|
|
* @segs: array of one or more packet segments that describe the flow
|
|
* @segs_cnt: number of packet segments provided
|
|
* @acts: array of default actions
|
|
* @acts_cnt: number of default actions
|
|
* @prof: stores the returned flow profile added
|
|
*
|
|
* Assumption: the caller has acquired the lock to the profile list
|
|
*/
|
|
static int
|
|
ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
|
|
enum ice_flow_dir dir, u64 prof_id,
|
|
struct ice_flow_seg_info *segs, u8 segs_cnt,
|
|
struct ice_flow_action *acts, u8 acts_cnt,
|
|
struct ice_flow_prof **prof)
|
|
{
|
|
struct ice_flow_prof_params *params;
|
|
int status;
|
|
u8 i;
|
|
|
|
if (!prof || (acts_cnt && !acts))
|
|
return -EINVAL;
|
|
|
|
params = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params), GFP_KERNEL);
|
|
if (!params)
|
|
return -ENOMEM;
|
|
|
|
params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
|
|
GFP_KERNEL);
|
|
if (!params->prof) {
|
|
status = -ENOMEM;
|
|
goto free_params;
|
|
}
|
|
|
|
/* initialize extraction sequence to all invalid (0xff) */
|
|
for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
|
|
params->es[i].prot_id = ICE_PROT_INVALID;
|
|
params->es[i].off = ICE_FV_OFFSET_INVAL;
|
|
}
|
|
|
|
params->blk = blk;
|
|
params->prof->id = prof_id;
|
|
params->prof->dir = dir;
|
|
params->prof->segs_cnt = segs_cnt;
|
|
|
|
/* Make a copy of the segments that need to be persistent in the flow
|
|
* profile instance
|
|
*/
|
|
for (i = 0; i < segs_cnt; i++)
|
|
memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs));
|
|
|
|
/* Make a copy of the actions that need to be persistent in the flow
|
|
* profile instance.
|
|
*/
|
|
if (acts_cnt) {
|
|
params->prof->acts = devm_kmemdup(ice_hw_to_dev(hw), acts,
|
|
acts_cnt * sizeof(*acts),
|
|
GFP_KERNEL);
|
|
|
|
if (!params->prof->acts) {
|
|
status = -ENOMEM;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
status = ice_flow_proc_segs(hw, params);
|
|
if (status) {
|
|
ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
|
|
goto out;
|
|
}
|
|
|
|
/* Add a HW profile for this flow profile */
|
|
status = ice_add_prof(hw, blk, prof_id, params->ptypes,
|
|
params->attr, params->attr_cnt, params->es,
|
|
params->mask, true);
|
|
if (status) {
|
|
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
|
|
goto out;
|
|
}
|
|
|
|
INIT_LIST_HEAD(¶ms->prof->entries);
|
|
mutex_init(¶ms->prof->entries_lock);
|
|
*prof = params->prof;
|
|
|
|
out:
|
|
if (status) {
|
|
if (params->prof->acts)
|
|
devm_kfree(ice_hw_to_dev(hw), params->prof->acts);
|
|
devm_kfree(ice_hw_to_dev(hw), params->prof);
|
|
}
|
|
free_params:
|
|
devm_kfree(ice_hw_to_dev(hw), params);
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_rem_prof_sync - remove a flow profile
|
|
* @hw: pointer to the hardware structure
|
|
* @blk: classification stage
|
|
* @prof: pointer to flow profile to remove
|
|
*
|
|
* Assumption: the caller has acquired the lock to the profile list
|
|
*/
|
|
static int
|
|
ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
|
|
struct ice_flow_prof *prof)
|
|
{
|
|
int status;
|
|
|
|
/* Remove all remaining flow entries before removing the flow profile */
|
|
if (!list_empty(&prof->entries)) {
|
|
struct ice_flow_entry *e, *t;
|
|
|
|
mutex_lock(&prof->entries_lock);
|
|
|
|
list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
|
|
status = ice_flow_rem_entry_sync(hw, blk, e);
|
|
if (status)
|
|
break;
|
|
}
|
|
|
|
mutex_unlock(&prof->entries_lock);
|
|
}
|
|
|
|
if (blk == ICE_BLK_ACL) {
|
|
struct ice_aqc_acl_profile_ranges query_rng_buf;
|
|
struct ice_aqc_acl_prof_generic_frmt buf;
|
|
u8 prof_id = 0;
|
|
|
|
/* Disassociate the scenario from the profile for the PF */
|
|
status = ice_flow_acl_disassoc_scen(hw, prof);
|
|
if (status)
|
|
return status;
|
|
|
|
/* Clear the range-checker if the profile ID is no longer
|
|
* used by any PF
|
|
*/
|
|
status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
|
|
if (status && status != -EBUSY) {
|
|
return status;
|
|
} else if (!status) {
|
|
/* Clear the range-checker value for profile ID */
|
|
memset(&query_rng_buf, 0,
|
|
sizeof(struct ice_aqc_acl_profile_ranges));
|
|
|
|
status = ice_flow_get_hw_prof(hw, blk, prof->id,
|
|
&prof_id);
|
|
if (status)
|
|
return status;
|
|
|
|
status = ice_prog_acl_prof_ranges(hw, prof_id,
|
|
&query_rng_buf, NULL);
|
|
if (status)
|
|
return status;
|
|
}
|
|
}
|
|
|
|
/* Remove all hardware profiles associated with this flow profile */
|
|
status = ice_rem_prof(hw, blk, prof->id);
|
|
if (!status) {
|
|
list_del(&prof->l_entry);
|
|
mutex_destroy(&prof->entries_lock);
|
|
if (prof->acts)
|
|
devm_kfree(ice_hw_to_dev(hw), prof->acts);
|
|
devm_kfree(ice_hw_to_dev(hw), prof);
|
|
}
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
|
|
* @buf: Destination buffer function writes partial xtrct sequence to
|
|
* @info: Info about field
|
|
*/
|
|
static void
|
|
ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
|
|
struct ice_flow_fld_info *info)
|
|
{
|
|
u16 dst, i;
|
|
u8 src;
|
|
|
|
src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
|
|
info->xtrct.disp / BITS_PER_BYTE;
|
|
dst = info->entry.val;
|
|
for (i = 0; i < info->entry.last; i++)
|
|
/* HW stores field vector words in LE, convert words back to BE
|
|
* so constructed entries will end up in network order
|
|
*/
|
|
buf->byte_selection[dst++] = src++ ^ 1;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
|
|
* @hw: pointer to the hardware structure
|
|
* @prof: pointer to flow profile
|
|
*/
|
|
static int
|
|
ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
|
|
{
|
|
struct ice_aqc_acl_prof_generic_frmt buf;
|
|
struct ice_flow_fld_info *info;
|
|
u8 prof_id = 0;
|
|
int status;
|
|
u16 i;
|
|
|
|
memset(&buf, 0, sizeof(buf));
|
|
|
|
status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
|
|
if (status)
|
|
return status;
|
|
|
|
status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
|
|
if (status && status != -EBUSY)
|
|
return status;
|
|
|
|
if (!status) {
|
|
/* Program the profile dependent configuration. This is done
|
|
* only once regardless of the number of PFs using that profile
|
|
*/
|
|
memset(&buf, 0, sizeof(buf));
|
|
|
|
for (i = 0; i < prof->segs_cnt; i++) {
|
|
struct ice_flow_seg_info *seg = &prof->segs[i];
|
|
u16 j;
|
|
|
|
for_each_set_bit(j, (unsigned long *)&seg->match,
|
|
ICE_FLOW_FIELD_IDX_MAX) {
|
|
info = &seg->fields[j];
|
|
|
|
if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
|
|
buf.word_selection[info->entry.val] =
|
|
info->xtrct.idx;
|
|
else
|
|
ice_flow_acl_set_xtrct_seq_fld(&buf,
|
|
info);
|
|
}
|
|
|
|
for (j = 0; j < seg->raws_cnt; j++) {
|
|
info = &seg->raws[j].info;
|
|
ice_flow_acl_set_xtrct_seq_fld(&buf, info);
|
|
}
|
|
}
|
|
|
|
memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
|
|
ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS);
|
|
}
|
|
|
|
/* Update the current PF */
|
|
buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
|
|
status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
|
|
* @hw: pointer to the hardware structure
|
|
* @blk: classification stage
|
|
* @vsi_handle: software VSI handle
|
|
* @vsig: target VSI group
|
|
*
|
|
* Assumption: the caller has already verified that the VSI to
|
|
* be added has the same characteristics as the VSIG and will
|
|
* thereby have access to all resources added to that VSIG.
|
|
*/
|
|
int
|
|
ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
|
|
u16 vsig)
|
|
{
|
|
int status;
|
|
|
|
if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&hw->fl_profs_locks[blk]);
|
|
status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
|
|
vsig);
|
|
mutex_unlock(&hw->fl_profs_locks[blk]);
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_assoc_prof - associate a VSI with a flow profile
|
|
* @hw: pointer to the hardware structure
|
|
* @blk: classification stage
|
|
* @prof: pointer to flow profile
|
|
* @vsi_handle: software VSI handle
|
|
*
|
|
* Assumption: the caller has acquired the lock to the profile list
|
|
* and the software VSI handle has been validated
|
|
*/
|
|
static int
|
|
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
|
|
struct ice_flow_prof *prof, u16 vsi_handle)
|
|
{
|
|
int status = 0;
|
|
|
|
if (!test_bit(vsi_handle, prof->vsis)) {
|
|
if (blk == ICE_BLK_ACL) {
|
|
status = ice_flow_acl_set_xtrct_seq(hw, prof);
|
|
if (status)
|
|
return status;
|
|
}
|
|
status = ice_add_prof_id_flow(hw, blk,
|
|
ice_get_hw_vsi_num(hw,
|
|
vsi_handle),
|
|
prof->id);
|
|
if (!status)
|
|
set_bit(vsi_handle, prof->vsis);
|
|
else
|
|
ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
|
|
status);
|
|
}
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_disassoc_prof - disassociate a VSI from a flow profile
|
|
* @hw: pointer to the hardware structure
|
|
* @blk: classification stage
|
|
* @prof: pointer to flow profile
|
|
* @vsi_handle: software VSI handle
|
|
*
|
|
* Assumption: the caller has acquired the lock to the profile list
|
|
* and the software VSI handle has been validated
|
|
*/
|
|
static int
|
|
ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
|
|
struct ice_flow_prof *prof, u16 vsi_handle)
|
|
{
|
|
int status = 0;
|
|
|
|
if (test_bit(vsi_handle, prof->vsis)) {
|
|
status = ice_rem_prof_id_flow(hw, blk,
|
|
ice_get_hw_vsi_num(hw,
|
|
vsi_handle),
|
|
prof->id);
|
|
if (!status)
|
|
clear_bit(vsi_handle, prof->vsis);
|
|
else
|
|
ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
|
|
status);
|
|
}
|
|
|
|
return status;
|
|
}
|
|
|
|
#define FLAG_GTP_EH_PDU_LINK BIT_ULL(13)
|
|
#define FLAG_GTP_EH_PDU BIT_ULL(14)
|
|
|
|
#define FLAG_GTPU_MSK \
|
|
(FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
|
|
#define FLAG_GTPU_UP \
|
|
(FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
|
|
#define FLAG_GTPU_DW \
|
|
(FLAG_GTP_EH_PDU)
|
|
/**
|
|
* ice_flow_set_hw_prof - Set HW flow profile based on the parsed profile info
|
|
* @hw: pointer to the HW struct
|
|
* @dest_vsi_handle: dest VSI handle
|
|
* @fdir_vsi_handle: fdir programming VSI handle
|
|
* @prof: stores parsed profile info from raw flow
|
|
* @blk: classification stage
|
|
*/
|
|
int
|
|
ice_flow_set_hw_prof(struct ice_hw *hw, u16 dest_vsi_handle,
|
|
u16 fdir_vsi_handle, struct ice_parser_profile *prof,
|
|
enum ice_block blk)
|
|
{
|
|
int id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
|
|
struct ice_flow_prof_params *params;
|
|
u8 fv_words = hw->blk[blk].es.fvw;
|
|
int status;
|
|
int i, idx;
|
|
|
|
params = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params), GFP_KERNEL);
|
|
if (!params)
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
|
|
params->es[i].prot_id = ICE_PROT_INVALID;
|
|
params->es[i].off = ICE_FV_OFFSET_INVAL;
|
|
}
|
|
|
|
for (i = 0; i < prof->fv_num; i++) {
|
|
if (hw->blk[blk].es.reverse)
|
|
idx = fv_words - i - 1;
|
|
else
|
|
idx = i;
|
|
params->es[idx].prot_id = prof->fv[i].proto_id;
|
|
params->es[idx].off = prof->fv[i].offset;
|
|
params->mask[idx] = (((prof->fv[i].msk) << 8) & 0xff00) |
|
|
(((prof->fv[i].msk) >> 8) & 0x00ff);
|
|
}
|
|
|
|
switch (prof->flags) {
|
|
case FLAG_GTPU_DW:
|
|
params->attr = ice_attr_gtpu_down;
|
|
params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
|
|
break;
|
|
case FLAG_GTPU_UP:
|
|
params->attr = ice_attr_gtpu_up;
|
|
params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
|
|
break;
|
|
default:
|
|
if (prof->flags_msk & FLAG_GTPU_MSK) {
|
|
params->attr = ice_attr_gtpu_session;
|
|
params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
|
|
}
|
|
break;
|
|
}
|
|
|
|
status = ice_add_prof(hw, blk, id, prof->ptypes,
|
|
params->attr, params->attr_cnt,
|
|
params->es, params->mask, false);
|
|
if (status)
|
|
goto free_params;
|
|
|
|
status = ice_flow_assoc_hw_prof(hw, blk, dest_vsi_handle,
|
|
fdir_vsi_handle, id);
|
|
if (status)
|
|
goto free_params;
|
|
|
|
return 0;
|
|
|
|
free_params:
|
|
devm_kfree(ice_hw_to_dev(hw), params);
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_add_prof - Add a flow profile for packet segments and matched fields
|
|
* @hw: pointer to the HW struct
|
|
* @blk: classification stage
|
|
* @dir: flow direction
|
|
* @prof_id: unique ID to identify this flow profile
|
|
* @segs: array of one or more packet segments that describe the flow
|
|
* @segs_cnt: number of packet segments provided
|
|
* @acts: array of default actions
|
|
* @acts_cnt: number of default actions
|
|
* @prof: stores the returned flow profile added
|
|
*/
|
|
int
|
|
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
|
|
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
|
|
struct ice_flow_action *acts, u8 acts_cnt,
|
|
struct ice_flow_prof **prof)
|
|
{
|
|
int status;
|
|
|
|
if (segs_cnt > ICE_FLOW_SEG_MAX)
|
|
return -ENOSPC;
|
|
|
|
if (!segs_cnt)
|
|
return -EINVAL;
|
|
|
|
if (!segs)
|
|
return -EINVAL;
|
|
|
|
status = ice_flow_val_hdrs(segs, segs_cnt);
|
|
if (status)
|
|
return status;
|
|
|
|
mutex_lock(&hw->fl_profs_locks[blk]);
|
|
|
|
status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
|
|
acts, acts_cnt, prof);
|
|
if (!status)
|
|
list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
|
|
|
|
mutex_unlock(&hw->fl_profs_locks[blk]);
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_rem_prof - Remove a flow profile and all entries associated with it
|
|
* @hw: pointer to the HW struct
|
|
* @blk: the block for which the flow profile is to be removed
|
|
* @prof_id: unique ID of the flow profile to be removed
|
|
*/
|
|
int
|
|
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
|
|
{
|
|
struct ice_flow_prof *prof;
|
|
int status;
|
|
|
|
mutex_lock(&hw->fl_profs_locks[blk]);
|
|
|
|
prof = ice_flow_find_prof_id(hw, blk, prof_id);
|
|
if (!prof) {
|
|
status = -ENOENT;
|
|
goto out;
|
|
}
|
|
|
|
/* prof becomes invalid after the call */
|
|
status = ice_flow_rem_prof_sync(hw, blk, prof);
|
|
|
|
out:
|
|
mutex_unlock(&hw->fl_profs_locks[blk]);
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_find_entry - look for a flow entry using its unique ID
|
|
* @hw: pointer to the HW struct
|
|
* @blk: classification stage
|
|
* @entry_id: unique ID to identify this flow entry
|
|
*
|
|
* This function looks for the flow entry with the specified unique ID in all
|
|
* flow profiles of the specified classification stage. If the entry is found,
|
|
* and it returns the handle to the flow entry. Otherwise, it returns
|
|
* ICE_FLOW_ENTRY_ID_INVAL.
|
|
*/
|
|
u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
|
|
{
|
|
struct ice_flow_entry *found = NULL;
|
|
struct ice_flow_prof *p;
|
|
|
|
mutex_lock(&hw->fl_profs_locks[blk]);
|
|
|
|
list_for_each_entry(p, &hw->fl_profs[blk], l_entry) {
|
|
struct ice_flow_entry *e;
|
|
|
|
mutex_lock(&p->entries_lock);
|
|
list_for_each_entry(e, &p->entries, l_entry)
|
|
if (e->id == entry_id) {
|
|
found = e;
|
|
break;
|
|
}
|
|
mutex_unlock(&p->entries_lock);
|
|
|
|
if (found)
|
|
break;
|
|
}
|
|
|
|
mutex_unlock(&hw->fl_profs_locks[blk]);
|
|
|
|
return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_check_actions - Checks the ACL rule's actions
|
|
* @hw: pointer to the hardware structure
|
|
* @acts: array of actions to be performed on a match
|
|
* @acts_cnt: number of actions
|
|
* @cnt_alloc: indicates if an ACL counter has been allocated.
|
|
*/
|
|
static int
|
|
ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
|
|
u8 acts_cnt, bool *cnt_alloc)
|
|
{
|
|
DECLARE_BITMAP(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
|
|
int i;
|
|
|
|
bitmap_zero(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
|
|
*cnt_alloc = false;
|
|
|
|
if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
|
|
return -EIO;
|
|
|
|
for (i = 0; i < acts_cnt; i++) {
|
|
if (acts[i].type != ICE_FLOW_ACT_NOP &&
|
|
acts[i].type != ICE_FLOW_ACT_DROP &&
|
|
acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
|
|
acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
|
|
return -EIO;
|
|
|
|
/* If the caller want to add two actions of the same type, then
|
|
* it is considered invalid configuration.
|
|
*/
|
|
if (test_and_set_bit((u16)acts[i].type, dup_check))
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Checks if ACL counters are needed. */
|
|
for (i = 0; i < acts_cnt; i++) {
|
|
if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
|
|
acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
|
|
acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
|
|
struct ice_acl_cntrs cntrs = { 0 };
|
|
int status;
|
|
|
|
cntrs.amount = 1;
|
|
cntrs.bank = 0; /* Only bank0 for the moment */
|
|
|
|
if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
|
|
cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
|
|
else
|
|
cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
|
|
|
|
status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
|
|
if (status)
|
|
return status;
|
|
/* Counter index within the bank */
|
|
acts[i].data.acl_act.value =
|
|
cpu_to_le16(cntrs.first_cntr);
|
|
*cnt_alloc = true;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
|
|
* @fld: number of the given field
|
|
* @info: info about field
|
|
* @range_buf: range checker configuration buffer
|
|
* @data: pointer to a data buffer containing flow entry's match values/masks
|
|
* @range: Input/output param indicating which range checkers are being used
|
|
*/
|
|
static void
|
|
ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
|
|
struct ice_aqc_acl_profile_ranges *range_buf,
|
|
u8 *data, u8 *range)
|
|
{
|
|
u16 new_mask;
|
|
|
|
/* If not specified, default mask is all bits in field */
|
|
new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
|
|
BIT(ice_flds_info[fld].size) - 1 :
|
|
(*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
|
|
|
|
/* If the mask is 0, then we don't need to worry about this input
|
|
* range checker value.
|
|
*/
|
|
if (new_mask) {
|
|
u16 new_high =
|
|
(*(u16 *)(data + info->src.last)) << info->xtrct.disp;
|
|
u16 new_low =
|
|
(*(u16 *)(data + info->src.val)) << info->xtrct.disp;
|
|
u8 range_idx = (u8)info->entry.val;
|
|
|
|
range_buf->checker_cfg[range_idx].low_boundary =
|
|
cpu_to_be16(new_low);
|
|
range_buf->checker_cfg[range_idx].high_boundary =
|
|
cpu_to_be16(new_high);
|
|
range_buf->checker_cfg[range_idx].mask = cpu_to_be16(new_mask);
|
|
|
|
/* Indicate which range checker is being used */
|
|
*range |= BIT(range_idx);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
|
|
* @fld: number of the given field
|
|
* @info: info about the field
|
|
* @buf: buffer containing the entry
|
|
* @dontcare: buffer containing don't care mask for entry
|
|
* @data: pointer to a data buffer containing flow entry's match values/masks
|
|
*/
|
|
static void
|
|
ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
|
|
u8 *dontcare, u8 *data)
|
|
{
|
|
u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
|
|
bool use_mask = false;
|
|
u8 disp;
|
|
|
|
src = info->src.val;
|
|
mask = info->src.mask;
|
|
dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
|
|
disp = info->xtrct.disp % BITS_PER_BYTE;
|
|
|
|
if (mask != ICE_FLOW_FLD_OFF_INVAL)
|
|
use_mask = true;
|
|
|
|
for (k = 0; k < info->entry.last; k++, dst++) {
|
|
/* Add overflow bits from previous byte */
|
|
buf[dst] = (tmp_s & 0xff00) >> 8;
|
|
|
|
/* If mask is not valid, tmp_m is always zero, so just setting
|
|
* dontcare to 0 (no masked bits). If mask is valid, pulls in
|
|
* overflow bits of mask from prev byte
|
|
*/
|
|
dontcare[dst] = (tmp_m & 0xff00) >> 8;
|
|
|
|
/* If there is displacement, last byte will only contain
|
|
* displaced data, but there is no more data to read from user
|
|
* buffer, so skip so as not to potentially read beyond end of
|
|
* user buffer
|
|
*/
|
|
if (!disp || k < info->entry.last - 1) {
|
|
/* Store shifted data to use in next byte */
|
|
tmp_s = data[src++] << disp;
|
|
|
|
/* Add current (shifted) byte */
|
|
buf[dst] |= tmp_s & 0xff;
|
|
|
|
/* Handle mask if valid */
|
|
if (use_mask) {
|
|
tmp_m = (~data[mask++] & 0xff) << disp;
|
|
dontcare[dst] |= tmp_m & 0xff;
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Fill in don't care bits at beginning of field */
|
|
if (disp) {
|
|
dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
|
|
for (k = 0; k < disp; k++)
|
|
dontcare[dst] |= BIT(k);
|
|
}
|
|
|
|
end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
|
|
|
|
/* Fill in don't care bits at end of field */
|
|
if (end_disp) {
|
|
dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
|
|
info->entry.last - 1;
|
|
for (k = end_disp; k < BITS_PER_BYTE; k++)
|
|
dontcare[dst] |= BIT(k);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_frmt_entry - Format ACL entry
|
|
* @hw: pointer to the hardware structure
|
|
* @prof: pointer to flow profile
|
|
* @e: pointer to the flow entry
|
|
* @data: pointer to a data buffer containing flow entry's match values/masks
|
|
* @acts: array of actions to be performed on a match
|
|
* @acts_cnt: number of actions
|
|
*
|
|
* Formats the key (and key_inverse) to be matched from the data passed in,
|
|
* along with data from the flow profile. This key/key_inverse pair makes up
|
|
* the 'entry' for an ACL flow entry.
|
|
*/
|
|
static int
|
|
ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
|
|
struct ice_flow_entry *e, u8 *data,
|
|
struct ice_flow_action *acts, u8 acts_cnt)
|
|
{
|
|
u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
|
|
struct ice_aqc_acl_profile_ranges *range_buf = NULL;
|
|
bool cnt_alloc;
|
|
u8 prof_id = 0;
|
|
u16 i, buf_sz;
|
|
int status;
|
|
|
|
status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
|
|
if (status)
|
|
return status;
|
|
|
|
/* Format the result action */
|
|
|
|
status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
|
|
if (status)
|
|
return status;
|
|
|
|
status = -ENOMEM;
|
|
|
|
e->acts = devm_kmemdup(ice_hw_to_dev(hw), acts,
|
|
acts_cnt * sizeof(*acts), GFP_KERNEL);
|
|
if (!e->acts)
|
|
goto out;
|
|
|
|
e->acts_cnt = acts_cnt;
|
|
|
|
/* Format the matching data */
|
|
buf_sz = prof->cfg.scen->width;
|
|
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_sz, GFP_KERNEL);
|
|
if (!buf)
|
|
goto out;
|
|
|
|
dontcare = devm_kzalloc(ice_hw_to_dev(hw), buf_sz, GFP_KERNEL);
|
|
if (!dontcare)
|
|
goto out;
|
|
|
|
/* 'key' buffer will store both key and key_inverse, so must be twice
|
|
* size of buf
|
|
*/
|
|
key = devm_kzalloc(ice_hw_to_dev(hw), buf_sz * 2, GFP_KERNEL);
|
|
if (!key)
|
|
goto out;
|
|
|
|
range_buf = devm_kzalloc(ice_hw_to_dev(hw),
|
|
sizeof(struct ice_aqc_acl_profile_ranges),
|
|
GFP_KERNEL);
|
|
if (!range_buf)
|
|
goto out;
|
|
|
|
/* Set don't care mask to all 1's to start, will zero out used bytes */
|
|
memset(dontcare, 0xff, buf_sz);
|
|
|
|
for (i = 0; i < prof->segs_cnt; i++) {
|
|
struct ice_flow_seg_info *seg = &prof->segs[i];
|
|
u16 j;
|
|
|
|
for_each_set_bit(j, (unsigned long *)&seg->match,
|
|
(u16)ICE_FLOW_FIELD_IDX_MAX) {
|
|
struct ice_flow_fld_info *info = &seg->fields[j];
|
|
|
|
if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
|
|
ice_flow_acl_frmt_entry_range(j, info,
|
|
range_buf, data,
|
|
&range);
|
|
else
|
|
ice_flow_acl_frmt_entry_fld(j, info, buf,
|
|
dontcare, data);
|
|
}
|
|
|
|
for (j = 0; j < seg->raws_cnt; j++) {
|
|
struct ice_flow_fld_info *info = &seg->raws[j].info;
|
|
u16 dst, src, mask, k;
|
|
bool use_mask = false;
|
|
|
|
src = info->src.val;
|
|
dst = info->entry.val -
|
|
ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
|
|
mask = info->src.mask;
|
|
|
|
if (mask != ICE_FLOW_FLD_OFF_INVAL)
|
|
use_mask = true;
|
|
|
|
for (k = 0; k < info->entry.last; k++, dst++) {
|
|
buf[dst] = data[src++];
|
|
if (use_mask)
|
|
dontcare[dst] = ~data[mask++];
|
|
else
|
|
dontcare[dst] = 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
|
|
dontcare[prof->cfg.scen->pid_idx] = 0;
|
|
|
|
/* Format the buffer for direction flags */
|
|
dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
|
|
|
|
if (prof->dir == ICE_FLOW_RX)
|
|
buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
|
|
|
|
if (range) {
|
|
buf[prof->cfg.scen->rng_chk_idx] = range;
|
|
/* Mark any unused range checkers as don't care */
|
|
dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
|
|
e->range_buf = range_buf;
|
|
} else {
|
|
devm_kfree(ice_hw_to_dev(hw), range_buf);
|
|
}
|
|
|
|
status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
|
|
buf_sz);
|
|
if (status)
|
|
goto out;
|
|
|
|
e->entry = key;
|
|
e->entry_sz = buf_sz * 2;
|
|
|
|
out:
|
|
if (buf)
|
|
devm_kfree(ice_hw_to_dev(hw), buf);
|
|
|
|
if (dontcare)
|
|
devm_kfree(ice_hw_to_dev(hw), dontcare);
|
|
|
|
if (status && key)
|
|
devm_kfree(ice_hw_to_dev(hw), key);
|
|
|
|
if (status && range_buf) {
|
|
devm_kfree(ice_hw_to_dev(hw), range_buf);
|
|
e->range_buf = NULL;
|
|
}
|
|
|
|
if (status && e->acts) {
|
|
devm_kfree(ice_hw_to_dev(hw), e->acts);
|
|
e->acts = NULL;
|
|
e->acts_cnt = 0;
|
|
}
|
|
|
|
if (status && cnt_alloc)
|
|
ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
|
|
* the compared data.
|
|
* @prof: pointer to flow profile
|
|
* @e: pointer to the comparing flow entry
|
|
* @do_chg_action: decide if we want to change the ACL action
|
|
* @do_add_entry: decide if we want to add the new ACL entry
|
|
* @do_rem_entry: decide if we want to remove the current ACL entry
|
|
*
|
|
* Find an ACL scenario entry that matches the compared data. In the same time,
|
|
* this function also figure out:
|
|
* a/ If we want to change the ACL action
|
|
* b/ If we want to add the new ACL entry
|
|
* c/ If we want to remove the current ACL entry
|
|
*/
|
|
static struct ice_flow_entry *
|
|
ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
|
|
struct ice_flow_entry *e, bool *do_chg_action,
|
|
bool *do_add_entry, bool *do_rem_entry)
|
|
{
|
|
struct ice_flow_entry *p, *return_entry = NULL;
|
|
u8 i, j;
|
|
|
|
/* Check if:
|
|
* a/ There exists an entry with same matching data, but different
|
|
* priority, then we remove this existing ACL entry. Then, we
|
|
* will add the new entry to the ACL scenario.
|
|
* b/ There exists an entry with same matching data, priority, and
|
|
* result action, then we do nothing
|
|
* c/ There exists an entry with same matching data, priority, but
|
|
* different, action, then do only change the action's entry.
|
|
* d/ Else, we add this new entry to the ACL scenario.
|
|
*/
|
|
*do_chg_action = false;
|
|
*do_add_entry = true;
|
|
*do_rem_entry = false;
|
|
list_for_each_entry(p, &prof->entries, l_entry) {
|
|
if (memcmp(p->entry, e->entry, p->entry_sz))
|
|
continue;
|
|
|
|
/* From this point, we have the same matching_data. */
|
|
*do_add_entry = false;
|
|
return_entry = p;
|
|
|
|
if (p->priority != e->priority) {
|
|
/* matching data && !priority */
|
|
*do_add_entry = true;
|
|
*do_rem_entry = true;
|
|
break;
|
|
}
|
|
|
|
/* From this point, we will have matching_data && priority */
|
|
if (p->acts_cnt != e->acts_cnt)
|
|
*do_chg_action = true;
|
|
for (i = 0; i < p->acts_cnt; i++) {
|
|
bool found_not_match = false;
|
|
|
|
for (j = 0; j < e->acts_cnt; j++)
|
|
if (memcmp(&p->acts[i], &e->acts[j],
|
|
sizeof(struct ice_flow_action))) {
|
|
found_not_match = true;
|
|
break;
|
|
}
|
|
|
|
if (found_not_match) {
|
|
*do_chg_action = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
/* (do_chg_action = true) means :
|
|
* matching_data && priority && !result_action
|
|
* (do_chg_action = false) means :
|
|
* matching_data && priority && result_action
|
|
*/
|
|
break;
|
|
}
|
|
|
|
return return_entry;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
|
|
* @p: flow priority
|
|
*/
|
|
static enum ice_acl_entry_prio
|
|
ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
|
|
{
|
|
enum ice_acl_entry_prio acl_prio;
|
|
|
|
switch (p) {
|
|
case ICE_FLOW_PRIO_LOW:
|
|
acl_prio = ICE_ACL_PRIO_LOW;
|
|
break;
|
|
case ICE_FLOW_PRIO_NORMAL:
|
|
acl_prio = ICE_ACL_PRIO_NORMAL;
|
|
break;
|
|
case ICE_FLOW_PRIO_HIGH:
|
|
acl_prio = ICE_ACL_PRIO_HIGH;
|
|
break;
|
|
default:
|
|
acl_prio = ICE_ACL_PRIO_NORMAL;
|
|
break;
|
|
}
|
|
|
|
return acl_prio;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_union_rng_chk - Perform union operation between two
|
|
* range-range checker buffers
|
|
* @dst_buf: pointer to destination range checker buffer
|
|
* @src_buf: pointer to source range checker buffer
|
|
*
|
|
* For this function, we do the union between dst_buf and src_buf
|
|
* range checker buffer, and we will save the result back to dst_buf
|
|
*/
|
|
static int
|
|
ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
|
|
struct ice_aqc_acl_profile_ranges *src_buf)
|
|
{
|
|
u8 i, j;
|
|
|
|
if (!dst_buf || !src_buf)
|
|
return -EINVAL;
|
|
|
|
for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
|
|
struct ice_acl_rng_data *cfg_data = NULL, *in_data;
|
|
bool will_populate = false;
|
|
|
|
in_data = &src_buf->checker_cfg[i];
|
|
|
|
if (!in_data->mask)
|
|
break;
|
|
|
|
for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
|
|
cfg_data = &dst_buf->checker_cfg[j];
|
|
|
|
if (!cfg_data->mask ||
|
|
!memcmp(cfg_data, in_data,
|
|
sizeof(struct ice_acl_rng_data))) {
|
|
will_populate = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (will_populate) {
|
|
memcpy(cfg_data, in_data,
|
|
sizeof(struct ice_acl_rng_data));
|
|
} else {
|
|
/* No available slot left to program range checker */
|
|
return -ENOSPC;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
|
|
* @hw: pointer to the hardware structure
|
|
* @prof: pointer to flow profile
|
|
* @entry: double pointer to the flow entry
|
|
*
|
|
* For this function, we will look at the current added entries in the
|
|
* corresponding ACL scenario. Then, we will perform matching logic to
|
|
* see if we want to add/modify/do nothing with this new entry.
|
|
*/
|
|
static int
|
|
ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
|
|
struct ice_flow_entry **entry)
|
|
{
|
|
bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
|
|
struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
|
|
struct ice_acl_act_entry *acts = NULL;
|
|
struct ice_flow_entry *exist;
|
|
struct ice_flow_entry *e;
|
|
int status = 0;
|
|
u8 i;
|
|
|
|
if (!entry || !(*entry) || !prof)
|
|
return -EINVAL;
|
|
|
|
e = *entry;
|
|
|
|
do_chg_rng_chk = false;
|
|
if (e->range_buf) {
|
|
u8 prof_id = 0;
|
|
|
|
status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
|
|
&prof_id);
|
|
if (status)
|
|
return status;
|
|
|
|
/* Query the current range-checker value in FW */
|
|
status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
|
|
NULL);
|
|
if (status)
|
|
return status;
|
|
memcpy(&cfg_rng_buf, &query_rng_buf,
|
|
sizeof(struct ice_aqc_acl_profile_ranges));
|
|
|
|
/* Generate the new range-checker value */
|
|
status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
|
|
if (status)
|
|
return status;
|
|
|
|
/* Reconfigure the range check if the buffer is changed. */
|
|
do_chg_rng_chk = false;
|
|
if (memcmp(&query_rng_buf, &cfg_rng_buf,
|
|
sizeof(struct ice_aqc_acl_profile_ranges))) {
|
|
status = ice_prog_acl_prof_ranges(hw, prof_id,
|
|
&cfg_rng_buf, NULL);
|
|
if (status)
|
|
return status;
|
|
|
|
do_chg_rng_chk = true;
|
|
}
|
|
}
|
|
|
|
/* Figure out if we want to (change the ACL action) and/or
|
|
* (Add the new ACL entry) and/or (Remove the current ACL entry)
|
|
*/
|
|
exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
|
|
&do_add_entry, &do_rem_entry);
|
|
if (do_rem_entry) {
|
|
status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
|
|
if (status)
|
|
return status;
|
|
}
|
|
|
|
/* Prepare the result action buffer */
|
|
acts = devm_kcalloc(ice_hw_to_dev(hw), e->entry_sz,
|
|
sizeof(struct ice_acl_act_entry), GFP_KERNEL);
|
|
if (!acts)
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < e->acts_cnt; i++)
|
|
memcpy(&acts[i], &e->acts[i].data.acl_act,
|
|
sizeof(struct ice_acl_act_entry));
|
|
|
|
if (do_add_entry) {
|
|
enum ice_acl_entry_prio prio;
|
|
u8 *keys, *inverts;
|
|
u16 entry_idx;
|
|
|
|
keys = (u8 *)e->entry;
|
|
inverts = keys + (e->entry_sz / 2);
|
|
prio = ice_flow_acl_convert_to_acl_prio(e->priority);
|
|
|
|
status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
|
|
inverts, acts, e->acts_cnt,
|
|
&entry_idx);
|
|
if (status)
|
|
goto out;
|
|
|
|
e->scen_entry_idx = entry_idx;
|
|
list_add(&e->l_entry, &prof->entries);
|
|
} else {
|
|
if (do_chg_action) {
|
|
/* For the action memory info, update the SW's copy of
|
|
* exist entry with e's action memory info
|
|
*/
|
|
devm_kfree(ice_hw_to_dev(hw), exist->acts);
|
|
exist->acts_cnt = e->acts_cnt;
|
|
exist->acts = devm_kcalloc(ice_hw_to_dev(hw),
|
|
exist->acts_cnt,
|
|
sizeof(struct ice_flow_action),
|
|
GFP_KERNEL);
|
|
if (!exist->acts) {
|
|
status = -ENOMEM;
|
|
goto out;
|
|
}
|
|
|
|
memcpy(exist->acts, e->acts,
|
|
sizeof(struct ice_flow_action) * e->acts_cnt);
|
|
|
|
status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
|
|
e->acts_cnt,
|
|
exist->scen_entry_idx);
|
|
if (status)
|
|
goto out;
|
|
}
|
|
|
|
if (do_chg_rng_chk) {
|
|
/* In this case, we want to update the range checker
|
|
* information of the exist entry
|
|
*/
|
|
status = ice_flow_acl_union_rng_chk(exist->range_buf,
|
|
e->range_buf);
|
|
if (status)
|
|
goto out;
|
|
}
|
|
|
|
/* As we don't add the new entry to our SW DB, deallocate its
|
|
* memories, and return the exist entry to the caller
|
|
*/
|
|
ice_dealloc_flow_entry(hw, e);
|
|
*(entry) = exist;
|
|
}
|
|
out:
|
|
devm_kfree(ice_hw_to_dev(hw), acts);
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_acl_add_scen_entry - Add entry to ACL scenario
|
|
* @hw: pointer to the hardware structure
|
|
* @prof: pointer to flow profile
|
|
* @e: double pointer to the flow entry
|
|
*/
|
|
static int
|
|
ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
|
|
struct ice_flow_entry **e)
|
|
{
|
|
int status;
|
|
|
|
mutex_lock(&prof->entries_lock);
|
|
status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
|
|
mutex_unlock(&prof->entries_lock);
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_add_entry - Add a flow entry
|
|
* @hw: pointer to the HW struct
|
|
* @blk: classification stage
|
|
* @prof_id: ID of the profile to add a new flow entry to
|
|
* @entry_id: unique ID to identify this flow entry
|
|
* @vsi_handle: software VSI handle for the flow entry
|
|
* @prio: priority of the flow entry
|
|
* @data: pointer to a data buffer containing flow entry's match values/masks
|
|
* @acts: arrays of actions to be performed on a match
|
|
* @acts_cnt: number of actions
|
|
* @entry_h: pointer to buffer that receives the new flow entry's handle
|
|
*/
|
|
int
|
|
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
|
|
u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
|
|
void *data, struct ice_flow_action *acts, u8 acts_cnt,
|
|
u64 *entry_h)
|
|
{
|
|
struct ice_flow_entry *e = NULL;
|
|
struct ice_flow_prof *prof;
|
|
int status = 0;
|
|
|
|
/* ACL entries must indicate an action */
|
|
if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
|
|
return -EINVAL;
|
|
|
|
/* No flow entry data is expected for RSS */
|
|
if (!entry_h || (!data && blk != ICE_BLK_RSS))
|
|
return -EINVAL;
|
|
|
|
if (!ice_is_vsi_valid(hw, vsi_handle))
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&hw->fl_profs_locks[blk]);
|
|
|
|
prof = ice_flow_find_prof_id(hw, blk, prof_id);
|
|
if (!prof) {
|
|
status = -ENOENT;
|
|
} else {
|
|
/* Allocate memory for the entry being added and associate
|
|
* the VSI to the found flow profile
|
|
*/
|
|
e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
|
|
if (!e)
|
|
status = -ENOMEM;
|
|
else
|
|
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
|
|
}
|
|
|
|
mutex_unlock(&hw->fl_profs_locks[blk]);
|
|
if (status)
|
|
goto out;
|
|
|
|
e->id = entry_id;
|
|
e->vsi_handle = vsi_handle;
|
|
e->prof = prof;
|
|
e->priority = prio;
|
|
|
|
switch (blk) {
|
|
case ICE_BLK_FD:
|
|
case ICE_BLK_RSS:
|
|
break;
|
|
case ICE_BLK_ACL:
|
|
/* ACL will handle the entry management */
|
|
if (ice_dcf_is_acl_capable(hw)) {
|
|
status = -EBUSY;
|
|
goto out;
|
|
}
|
|
status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
|
|
acts_cnt);
|
|
if (status)
|
|
goto out;
|
|
|
|
status = ice_flow_acl_add_scen_entry(hw, prof, &e);
|
|
if (status)
|
|
goto out;
|
|
|
|
break;
|
|
default:
|
|
status = -EOPNOTSUPP;
|
|
goto out;
|
|
}
|
|
|
|
if (blk != ICE_BLK_ACL) {
|
|
/* ACL will handle the entry management */
|
|
mutex_lock(&prof->entries_lock);
|
|
list_add(&e->l_entry, &prof->entries);
|
|
mutex_unlock(&prof->entries_lock);
|
|
}
|
|
|
|
*entry_h = ICE_FLOW_ENTRY_HNDL(e);
|
|
|
|
out:
|
|
if (status && e) {
|
|
if (e->entry)
|
|
devm_kfree(ice_hw_to_dev(hw), e->entry);
|
|
devm_kfree(ice_hw_to_dev(hw), e);
|
|
}
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_rem_entry - Remove a flow entry
|
|
* @hw: pointer to the HW struct
|
|
* @blk: classification stage
|
|
* @entry_h: handle to the flow entry to be removed
|
|
*/
|
|
int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
|
|
u64 entry_h)
|
|
{
|
|
struct ice_flow_entry *entry;
|
|
struct ice_flow_prof *prof;
|
|
int status = 0;
|
|
|
|
if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
|
|
return -EINVAL;
|
|
|
|
entry = ICE_FLOW_ENTRY_PTR(entry_h);
|
|
|
|
/* Retain the pointer to the flow profile as the entry will be freed */
|
|
prof = entry->prof;
|
|
|
|
if (prof) {
|
|
mutex_lock(&prof->entries_lock);
|
|
status = ice_flow_rem_entry_sync(hw, blk, entry);
|
|
mutex_unlock(&prof->entries_lock);
|
|
}
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
|
|
* @seg: packet segment the field being set belongs to
|
|
* @fld: field to be set
|
|
* @field_type: type of the field
|
|
* @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
|
|
* entry's input buffer
|
|
* @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
|
|
* input buffer
|
|
* @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
|
|
* entry's input buffer
|
|
*
|
|
* This helper function stores information of a field being matched, including
|
|
* the type of the field and the locations of the value to match, the mask, and
|
|
* the upper-bound value in the start of the input buffer for a flow entry.
|
|
* This function should only be used for fixed-size data structures.
|
|
*
|
|
* This function also opportunistically determines the protocol headers to be
|
|
* present based on the fields being set. Some fields cannot be used alone to
|
|
* determine the protocol headers present. Sometimes, fields for particular
|
|
* protocol headers are not matched. In those cases, the protocol headers
|
|
* must be explicitly set.
|
|
*/
|
|
static void
|
|
ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
|
|
enum ice_flow_fld_match_type field_type, u16 val_loc,
|
|
u16 mask_loc, u16 last_loc)
|
|
{
|
|
u64 bit = BIT_ULL(fld);
|
|
|
|
seg->match |= bit;
|
|
if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
|
|
seg->range |= bit;
|
|
|
|
seg->fields[fld].type = field_type;
|
|
seg->fields[fld].src.val = val_loc;
|
|
seg->fields[fld].src.mask = mask_loc;
|
|
seg->fields[fld].src.last = last_loc;
|
|
|
|
ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
|
|
}
|
|
|
|
/**
|
|
* ice_flow_set_fld - specifies locations of field from entry's input buffer
|
|
* @seg: packet segment the field being set belongs to
|
|
* @fld: field to be set
|
|
* @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
|
|
* entry's input buffer
|
|
* @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
|
|
* input buffer
|
|
* @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
|
|
* entry's input buffer
|
|
* @range: indicate if field being matched is to be in a range
|
|
*
|
|
* This function specifies the locations, in the form of byte offsets from the
|
|
* start of the input buffer for a flow entry, from where the value to match,
|
|
* the mask value, and upper value can be extracted. These locations are then
|
|
* stored in the flow profile. When adding a flow entry associated with the
|
|
* flow profile, these locations will be used to quickly extract the values and
|
|
* create the content of a match entry. This function should only be used for
|
|
* fixed-size data structures.
|
|
*/
|
|
void
|
|
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
|
|
u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
|
|
{
|
|
enum ice_flow_fld_match_type t = range ?
|
|
ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
|
|
|
|
ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
|
|
}
|
|
|
|
/**
|
|
* ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
|
|
* @seg: packet segment the field being set belongs to
|
|
* @fld: field to be set
|
|
* @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
|
|
* entry's input buffer
|
|
* @pref_loc: location of prefix value from entry's input buffer
|
|
* @pref_sz: size of the location holding the prefix value
|
|
*
|
|
* This function specifies the locations, in the form of byte offsets from the
|
|
* start of the input buffer for a flow entry, from where the value to match
|
|
* and the IPv4 prefix value can be extracted. These locations are then stored
|
|
* in the flow profile. When adding flow entries to the associated flow profile,
|
|
* these locations can be used to quickly extract the values to create the
|
|
* content of a match entry. This function should only be used for fixed-size
|
|
* data structures.
|
|
*/
|
|
void
|
|
ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
|
|
u16 val_loc, u16 pref_loc, u8 pref_sz)
|
|
{
|
|
/* For this type of field, the "mask" location is for the prefix value's
|
|
* location and the "last" location is for the size of the location of
|
|
* the prefix value.
|
|
*/
|
|
ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
|
|
pref_loc, (u16)pref_sz);
|
|
}
|
|
|
|
/**
|
|
* ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
|
|
* @seg: packet segment the field being set belongs to
|
|
* @off: offset of the raw field from the beginning of the segment in bytes
|
|
* @len: length of the raw pattern to be matched
|
|
* @val_loc: location of the value to match from entry's input buffer
|
|
* @mask_loc: location of mask value from entry's input buffer
|
|
*
|
|
* This function specifies the offset of the raw field to be match from the
|
|
* beginning of the specified packet segment, and the locations, in the form of
|
|
* byte offsets from the start of the input buffer for a flow entry, from where
|
|
* the value to match and the mask value to be extracted. These locations are
|
|
* then stored in the flow profile. When adding flow entries to the associated
|
|
* flow profile, these locations can be used to quickly extract the values to
|
|
* create the content of a match entry. This function should only be used for
|
|
* fixed-size data structures.
|
|
*/
|
|
void
|
|
ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
|
|
u16 val_loc, u16 mask_loc)
|
|
{
|
|
if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
|
|
seg->raws[seg->raws_cnt].off = off;
|
|
seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
|
|
seg->raws[seg->raws_cnt].info.src.val = val_loc;
|
|
seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
|
|
/* The "last" field is used to store the length of the field */
|
|
seg->raws[seg->raws_cnt].info.src.last = len;
|
|
}
|
|
|
|
/* Overflows of "raws" will be handled as an error condition later in
|
|
* the flow when this information is processed.
|
|
*/
|
|
seg->raws_cnt++;
|
|
}
|
|
|
|
/**
|
|
* ice_flow_rem_vsi_prof - remove vsi from flow profile
|
|
* @hw: pointer to the hardware structure
|
|
* @blk: classification stage
|
|
* @vsi_handle: software VSI handle
|
|
* @prof_id: unique ID to identify this flow profile
|
|
*
|
|
* This function removes the flow entries associated to the input
|
|
* vsi handle and disassociates the vsi from the flow profile.
|
|
*/
|
|
int ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
|
|
u64 prof_id)
|
|
{
|
|
struct ice_flow_prof *prof = NULL;
|
|
int status = 0;
|
|
|
|
if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
|
|
return -EINVAL;
|
|
|
|
/* find flow profile pointer with input package block and profile id */
|
|
prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
|
|
if (!prof) {
|
|
ice_debug(hw, ICE_DBG_PKG,
|
|
"Cannot find flow profile id=%llu\n", prof_id);
|
|
return -ENOENT;
|
|
}
|
|
|
|
/* Remove all remaining flow entries before removing the flow profile */
|
|
if (!list_empty(&prof->entries)) {
|
|
struct ice_flow_entry *e, *t;
|
|
|
|
mutex_lock(&prof->entries_lock);
|
|
list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
|
|
if (e->vsi_handle != vsi_handle)
|
|
continue;
|
|
|
|
status = ice_flow_rem_entry_sync(hw, blk, e);
|
|
if (status)
|
|
break;
|
|
}
|
|
mutex_unlock(&prof->entries_lock);
|
|
}
|
|
if (status)
|
|
return status;
|
|
|
|
/* disassociate the flow profile from sw vsi handle */
|
|
status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
|
|
if (status)
|
|
ice_debug(hw, ICE_DBG_PKG,
|
|
"ice_flow_disassoc_prof() failed with status=%d\n",
|
|
status);
|
|
return status;
|
|
}
|
|
|
|
#define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
|
|
(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
|
|
|
|
#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
|
|
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
|
|
|
|
#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
|
|
(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
|
|
|
|
#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
|
|
(ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
|
|
ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
|
|
ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
|
|
|
|
/**
|
|
* ice_flow_set_rss_seg_info - setup packet segments for RSS
|
|
* @segs: pointer to the flow field segment(s)
|
|
* @seg_cnt: segment count
|
|
* @cfg: configure parameters
|
|
*
|
|
* Helper function to extract fields from hash bitmap and use flow
|
|
* header value to set flow field segment for further use in flow
|
|
* profile entry or removal.
|
|
*/
|
|
static int
|
|
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
|
|
const struct ice_rss_hash_cfg *cfg)
|
|
{
|
|
struct ice_flow_seg_info *seg;
|
|
u64 val;
|
|
u16 i;
|
|
|
|
/* set inner most segment */
|
|
seg = &segs[seg_cnt - 1];
|
|
|
|
for_each_set_bit(i, (const unsigned long *)&cfg->hash_flds,
|
|
(u16)ICE_FLOW_FIELD_IDX_MAX)
|
|
ice_flow_set_fld(seg, (enum ice_flow_field)i,
|
|
ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
|
|
ICE_FLOW_FLD_OFF_INVAL, false);
|
|
|
|
ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
|
|
|
|
/* set outer most header */
|
|
if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
|
|
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
|
|
ICE_FLOW_SEG_HDR_IPV_FRAG |
|
|
ICE_FLOW_SEG_HDR_IPV_OTHER;
|
|
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
|
|
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
|
|
ICE_FLOW_SEG_HDR_IPV_FRAG |
|
|
ICE_FLOW_SEG_HDR_IPV_OTHER;
|
|
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
|
|
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
|
|
ICE_FLOW_SEG_HDR_GRE |
|
|
ICE_FLOW_SEG_HDR_IPV_OTHER;
|
|
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
|
|
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
|
|
ICE_FLOW_SEG_HDR_GRE |
|
|
ICE_FLOW_SEG_HDR_IPV_OTHER;
|
|
|
|
if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
|
|
~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
|
|
~ICE_FLOW_SEG_HDR_IPV_FRAG)
|
|
return -EINVAL;
|
|
|
|
val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
|
|
if (val && !is_power_of_2(val))
|
|
return -EIO;
|
|
|
|
val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
|
|
if (val && !is_power_of_2(val))
|
|
return -EIO;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* ice_rem_vsi_rss_list - remove VSI from RSS list
|
|
* @hw: pointer to the hardware structure
|
|
* @vsi_handle: software VSI handle
|
|
*
|
|
* Remove the VSI from all RSS configurations in the list.
|
|
*/
|
|
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
|
|
{
|
|
struct ice_rss_cfg *r, *tmp;
|
|
|
|
if (list_empty(&hw->rss_list_head))
|
|
return;
|
|
|
|
mutex_lock(&hw->rss_locks);
|
|
list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
|
|
if (test_and_clear_bit(vsi_handle, r->vsis))
|
|
if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
|
|
list_del(&r->l_entry);
|
|
devm_kfree(ice_hw_to_dev(hw), r);
|
|
}
|
|
mutex_unlock(&hw->rss_locks);
|
|
}
|
|
|
|
/**
|
|
* ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
|
|
* @hw: pointer to the hardware structure
|
|
* @vsi_handle: software VSI handle
|
|
*
|
|
* This function will iterate through all flow profiles and disassociate
|
|
* the VSI from that profile. If the flow profile has no VSIs it will
|
|
* be removed.
|
|
*/
|
|
int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
|
|
{
|
|
const enum ice_block blk = ICE_BLK_RSS;
|
|
struct ice_flow_prof *p, *t;
|
|
int status = 0;
|
|
|
|
if (!ice_is_vsi_valid(hw, vsi_handle))
|
|
return -EINVAL;
|
|
|
|
if (list_empty(&hw->fl_profs[blk]))
|
|
return 0;
|
|
|
|
mutex_lock(&hw->rss_locks);
|
|
list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
|
|
if (test_bit(vsi_handle, p->vsis)) {
|
|
status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
|
|
if (status)
|
|
break;
|
|
|
|
if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
|
|
status = ice_flow_rem_prof(hw, blk, p->id);
|
|
if (status)
|
|
break;
|
|
}
|
|
}
|
|
mutex_unlock(&hw->rss_locks);
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_get_rss_hdr_type - get a RSS profile's header type
|
|
* @prof: RSS flow profile
|
|
*/
|
|
static enum ice_rss_cfg_hdr_type
|
|
ice_get_rss_hdr_type(struct ice_flow_prof *prof)
|
|
{
|
|
enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
|
|
|
|
if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
|
|
hdr_type = ICE_RSS_OUTER_HEADERS;
|
|
} else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
|
|
if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
|
|
hdr_type = ICE_RSS_INNER_HEADERS;
|
|
if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
|
|
hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
|
|
if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
|
|
hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
|
|
}
|
|
|
|
return hdr_type;
|
|
}
|
|
|
|
/**
|
|
* ice_rem_rss_list - remove RSS configuration from list
|
|
* @hw: pointer to the hardware structure
|
|
* @vsi_handle: software VSI handle
|
|
* @prof: pointer to flow profile
|
|
*
|
|
* Assumption: lock has already been acquired for RSS list
|
|
*/
|
|
static void
|
|
ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
|
|
{
|
|
enum ice_rss_cfg_hdr_type hdr_type;
|
|
struct ice_rss_cfg *r, *tmp;
|
|
|
|
/* Search for RSS hash fields associated to the VSI that match the
|
|
* hash configurations associated to the flow profile. If found
|
|
* remove from the RSS entry list of the VSI context and delete entry.
|
|
*/
|
|
hdr_type = ice_get_rss_hdr_type(prof);
|
|
list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
|
|
if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
|
|
r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
|
|
r->hash.hdr_type == hdr_type) {
|
|
clear_bit(vsi_handle, r->vsis);
|
|
if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
|
|
list_del(&r->l_entry);
|
|
devm_kfree(ice_hw_to_dev(hw), r);
|
|
}
|
|
return;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* ice_add_rss_list - add RSS configuration to list
|
|
* @hw: pointer to the hardware structure
|
|
* @vsi_handle: software VSI handle
|
|
* @prof: pointer to flow profile
|
|
*
|
|
* Assumption: lock has already been acquired for RSS list
|
|
*/
|
|
static int
|
|
ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
|
|
{
|
|
enum ice_rss_cfg_hdr_type hdr_type;
|
|
struct ice_rss_cfg *r, *rss_cfg;
|
|
|
|
hdr_type = ice_get_rss_hdr_type(prof);
|
|
list_for_each_entry(r, &hw->rss_list_head, l_entry)
|
|
if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
|
|
r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
|
|
r->hash.hdr_type == hdr_type) {
|
|
set_bit(vsi_handle, r->vsis);
|
|
return 0;
|
|
}
|
|
|
|
rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
|
|
GFP_KERNEL);
|
|
if (!rss_cfg)
|
|
return -ENOMEM;
|
|
|
|
rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
|
|
rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
|
|
rss_cfg->hash.hdr_type = hdr_type;
|
|
rss_cfg->hash.symm = prof->cfg.symm;
|
|
set_bit(vsi_handle, rss_cfg->vsis);
|
|
|
|
list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
|
|
|
|
return 0;
|
|
}
|
|
|
|
#define ICE_FLOW_PROF_HASH_S 0
|
|
#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
|
|
#define ICE_FLOW_PROF_HDR_S 32
|
|
#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
|
|
#define ICE_FLOW_PROF_ENCAP_S 62
|
|
#define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
|
|
|
|
/* Flow profile ID format:
|
|
* [0:31] - Packet match fields
|
|
* [32:61] - Protocol header
|
|
* [62:63] - Encapsulation flag:
|
|
* 0 if non-tunneled
|
|
* 1 if tunneled
|
|
* 2 for tunneled with outer ipv4
|
|
* 3 for tunneled with outer ipv6
|
|
*/
|
|
#define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
|
|
((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
|
|
(((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
|
|
(((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
|
|
|
|
static void
|
|
ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
|
|
{
|
|
u32 s = ((src % 4) << 3); /* byte shift */
|
|
u32 v = dst | 0x80; /* value to program */
|
|
u8 i = src / 4; /* register index */
|
|
u32 reg;
|
|
|
|
reg = rd32(hw, GLQF_HSYMM(prof_id, i));
|
|
reg = (reg & ~(0xff << s)) | (v << s);
|
|
wr32(hw, GLQF_HSYMM(prof_id, i), reg);
|
|
}
|
|
|
|
static void
|
|
ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
|
|
{
|
|
int fv_last_word =
|
|
ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
|
|
int i;
|
|
|
|
for (i = 0; i < len; i++) {
|
|
ice_rss_config_xor_word(hw, prof_id,
|
|
/* Yes, field vector in GLQF_HSYMM and
|
|
* GLQF_HINSET is inversed!
|
|
*/
|
|
fv_last_word - (src + i),
|
|
fv_last_word - (dst + i));
|
|
ice_rss_config_xor_word(hw, prof_id,
|
|
fv_last_word - (dst + i),
|
|
fv_last_word - (src + i));
|
|
}
|
|
}
|
|
|
|
static void
|
|
ice_rss_update_symm(struct ice_hw *hw,
|
|
struct ice_flow_prof *prof)
|
|
{
|
|
struct ice_prof_map *map;
|
|
u8 prof_id, m;
|
|
|
|
mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
|
|
map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
|
|
if (map)
|
|
prof_id = map->prof_id;
|
|
mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
|
|
if (!map)
|
|
return;
|
|
/* clear to default */
|
|
for (m = 0; m < 6; m++)
|
|
wr32(hw, GLQF_HSYMM(prof_id, m), 0);
|
|
if (prof->cfg.symm) {
|
|
struct ice_flow_seg_info *seg =
|
|
&prof->segs[prof->segs_cnt - 1];
|
|
|
|
struct ice_flow_seg_xtrct *ipv4_src =
|
|
&seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
|
|
struct ice_flow_seg_xtrct *ipv4_dst =
|
|
&seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
|
|
struct ice_flow_seg_xtrct *ipv6_src =
|
|
&seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
|
|
struct ice_flow_seg_xtrct *ipv6_dst =
|
|
&seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
|
|
|
|
struct ice_flow_seg_xtrct *tcp_src =
|
|
&seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
|
|
struct ice_flow_seg_xtrct *tcp_dst =
|
|
&seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
|
|
|
|
struct ice_flow_seg_xtrct *udp_src =
|
|
&seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
|
|
struct ice_flow_seg_xtrct *udp_dst =
|
|
&seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
|
|
|
|
struct ice_flow_seg_xtrct *sctp_src =
|
|
&seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
|
|
struct ice_flow_seg_xtrct *sctp_dst =
|
|
&seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
|
|
|
|
/* xor IPv4 */
|
|
if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
|
|
ice_rss_config_xor(hw, prof_id,
|
|
ipv4_src->idx, ipv4_dst->idx, 2);
|
|
|
|
/* xor IPv6 */
|
|
if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
|
|
ice_rss_config_xor(hw, prof_id,
|
|
ipv6_src->idx, ipv6_dst->idx, 8);
|
|
|
|
/* xor TCP */
|
|
if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
|
|
ice_rss_config_xor(hw, prof_id,
|
|
tcp_src->idx, tcp_dst->idx, 1);
|
|
|
|
/* xor UDP */
|
|
if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
|
|
ice_rss_config_xor(hw, prof_id,
|
|
udp_src->idx, udp_dst->idx, 1);
|
|
|
|
/* xor SCTP */
|
|
if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
|
|
ice_rss_config_xor(hw, prof_id,
|
|
sctp_src->idx, sctp_dst->idx, 1);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* ice_rss_cfg_raw_symm - configure symmetric hash parameters
|
|
* for raw pattern
|
|
* @hw: pointer to the hardware structure
|
|
* @prof: pointer to parser profile
|
|
* @prof_id: profile ID
|
|
*
|
|
* Calculate symmetric hash parameters based on input protocol type.
|
|
*/
|
|
static void
|
|
ice_rss_cfg_raw_symm(struct ice_hw *hw,
|
|
struct ice_parser_profile *prof, u64 prof_id)
|
|
{
|
|
u8 src_idx, dst_idx, proto_id;
|
|
int len, i = 0;
|
|
|
|
while (i < prof->fv_num) {
|
|
proto_id = prof->fv[i].proto_id;
|
|
|
|
switch (proto_id) {
|
|
case ICE_PROT_IPV4_OF_OR_S:
|
|
case ICE_PROT_IPV4_IL:
|
|
case ICE_PROT_IPV4_IL_IL:
|
|
len = ICE_FLOW_FLD_SZ_IPV4_ADDR /
|
|
ICE_FLOW_FV_EXTRACT_SZ;
|
|
if (prof->fv[i].offset ==
|
|
ICE_FLOW_FIELD_IPV4_SRC_OFFSET &&
|
|
prof->fv[i + len].proto_id == proto_id &&
|
|
prof->fv[i + len].offset ==
|
|
ICE_FLOW_FIELD_IPV4_DST_OFFSET) {
|
|
src_idx = i;
|
|
dst_idx = i + len;
|
|
i += 2 * len;
|
|
break;
|
|
}
|
|
i++;
|
|
continue;
|
|
case ICE_PROT_IPV6_OF_OR_S:
|
|
case ICE_PROT_IPV6_IL:
|
|
case ICE_PROT_IPV6_IL_IL:
|
|
len = ICE_FLOW_FLD_SZ_IPV6_ADDR /
|
|
ICE_FLOW_FV_EXTRACT_SZ;
|
|
if (prof->fv[i].offset ==
|
|
ICE_FLOW_FIELD_IPV6_SRC_OFFSET &&
|
|
prof->fv[i + len].proto_id == proto_id &&
|
|
prof->fv[i + len].offset ==
|
|
ICE_FLOW_FIELD_IPV6_DST_OFFSET) {
|
|
src_idx = i;
|
|
dst_idx = i + len;
|
|
i += 2 * len;
|
|
break;
|
|
}
|
|
i++;
|
|
continue;
|
|
case ICE_PROT_TCP_IL:
|
|
case ICE_PROT_UDP_IL_OR_S:
|
|
case ICE_PROT_SCTP_IL:
|
|
len = ICE_FLOW_FLD_SZ_PORT /
|
|
ICE_FLOW_FV_EXTRACT_SZ;
|
|
if (prof->fv[i].offset ==
|
|
ICE_FLOW_FIELD_SRC_PORT_OFFSET &&
|
|
prof->fv[i + len].proto_id == proto_id &&
|
|
prof->fv[i + len].offset ==
|
|
ICE_FLOW_FIELD_DST_PORT_OFFSET) {
|
|
src_idx = i;
|
|
dst_idx = i + len;
|
|
i += 2 * len;
|
|
break;
|
|
}
|
|
i++;
|
|
continue;
|
|
default:
|
|
i++;
|
|
continue;
|
|
}
|
|
ice_rss_config_xor(hw, prof_id, src_idx, dst_idx, len);
|
|
}
|
|
}
|
|
|
|
/* Max registers index per packet profile */
|
|
#define ICE_SYMM_REG_INDEX_MAX 6
|
|
|
|
/**
|
|
* ice_rss_update_raw_symm - update symmetric hash configuration
|
|
* for raw pattern
|
|
* @hw: pointer to the hardware structure
|
|
* @cfg: configure parameters for raw pattern
|
|
* @id: profile tracking ID
|
|
*
|
|
* Update symmetric hash configuration for raw pattern if required.
|
|
* Otherwise only clear to default.
|
|
*/
|
|
void
|
|
ice_rss_update_raw_symm(struct ice_hw *hw,
|
|
struct ice_rss_raw_cfg *cfg, u64 id)
|
|
{
|
|
struct ice_prof_map *map;
|
|
u8 prof_id, m;
|
|
|
|
mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
|
|
map = ice_search_prof_id(hw, ICE_BLK_RSS, id);
|
|
if (map)
|
|
prof_id = map->prof_id;
|
|
mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
|
|
if (!map)
|
|
return;
|
|
/* clear to default */
|
|
for (m = 0; m < ICE_SYMM_REG_INDEX_MAX; m++)
|
|
wr32(hw, GLQF_HSYMM(prof_id, m), 0);
|
|
if (cfg->symm)
|
|
ice_rss_cfg_raw_symm(hw, &cfg->prof, prof_id);
|
|
}
|
|
|
|
/**
|
|
* ice_add_rss_cfg_sync - add an RSS configuration
|
|
* @hw: pointer to the hardware structure
|
|
* @vsi_handle: software VSI handle
|
|
* @cfg: configure parameters
|
|
*
|
|
* Assumption: lock has already been acquired for RSS list
|
|
*/
|
|
static int
|
|
ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
|
|
const struct ice_rss_hash_cfg *cfg)
|
|
{
|
|
const enum ice_block blk = ICE_BLK_RSS;
|
|
struct ice_flow_prof *prof = NULL;
|
|
struct ice_flow_seg_info *segs;
|
|
u8 segs_cnt;
|
|
int status;
|
|
|
|
segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
|
|
ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
|
|
|
|
segs = devm_kcalloc(ice_hw_to_dev(hw), segs_cnt, sizeof(*segs),
|
|
GFP_KERNEL);
|
|
if (!segs)
|
|
return -ENOMEM;
|
|
|
|
/* Construct the packet segment info from the hashed fields */
|
|
status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
|
|
if (status)
|
|
goto exit;
|
|
|
|
/* Search for a flow profile that has matching headers, hash fields
|
|
* and has the input VSI associated to it. If found, no further
|
|
* operations required and exit.
|
|
*/
|
|
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
|
|
vsi_handle,
|
|
ICE_FLOW_FIND_PROF_CHK_FLDS |
|
|
ICE_FLOW_FIND_PROF_CHK_VSI);
|
|
if (prof) {
|
|
if (prof->cfg.symm == cfg->symm)
|
|
goto exit;
|
|
prof->cfg.symm = cfg->symm;
|
|
goto update_symm;
|
|
}
|
|
|
|
/* Check if a flow profile exists with the same protocol headers and
|
|
* associated with the input VSI. If so disassociate the VSI from
|
|
* this profile. The VSI will be added to a new profile created with
|
|
* the protocol header and new hash field configuration.
|
|
*/
|
|
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
|
|
vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
|
|
if (prof) {
|
|
status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
|
|
if (!status)
|
|
ice_rem_rss_list(hw, vsi_handle, prof);
|
|
else
|
|
goto exit;
|
|
|
|
/* Remove profile if it has no VSIs associated */
|
|
if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
|
|
status = ice_flow_rem_prof(hw, blk, prof->id);
|
|
if (status)
|
|
goto exit;
|
|
}
|
|
}
|
|
|
|
/* Search for a profile that has same match fields only. If this
|
|
* exists then associate the VSI to this profile.
|
|
*/
|
|
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
|
|
vsi_handle,
|
|
ICE_FLOW_FIND_PROF_CHK_FLDS);
|
|
if (prof) {
|
|
if (prof->cfg.symm == cfg->symm) {
|
|
status = ice_flow_assoc_prof(hw, blk, prof,
|
|
vsi_handle);
|
|
if (!status)
|
|
status = ice_add_rss_list(hw, vsi_handle,
|
|
prof);
|
|
} else {
|
|
/* if a profile exist but with different symmetric
|
|
* requirement, just return error.
|
|
*/
|
|
status = -EOPNOTSUPP;
|
|
}
|
|
goto exit;
|
|
}
|
|
|
|
/* Create a new flow profile with generated profile and packet
|
|
* segment information.
|
|
*/
|
|
status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
|
|
ICE_FLOW_GEN_PROFID(cfg->hash_flds,
|
|
segs[segs_cnt - 1].hdrs,
|
|
cfg->hdr_type),
|
|
segs, segs_cnt, NULL, 0, &prof);
|
|
if (status)
|
|
goto exit;
|
|
|
|
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
|
|
/* If association to a new flow profile failed then this profile can
|
|
* be removed.
|
|
*/
|
|
if (status) {
|
|
ice_flow_rem_prof(hw, blk, prof->id);
|
|
goto exit;
|
|
}
|
|
|
|
status = ice_add_rss_list(hw, vsi_handle, prof);
|
|
|
|
prof->cfg.symm = cfg->symm;
|
|
update_symm:
|
|
ice_rss_update_symm(hw, prof);
|
|
|
|
exit:
|
|
devm_kfree(ice_hw_to_dev(hw), segs);
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_add_rss_cfg - add an RSS configuration with specified hashed fields
|
|
* @hw: pointer to the hardware structure
|
|
* @vsi_handle: software VSI handle
|
|
* @cfg: configure parameters
|
|
*
|
|
* This function will generate a flow profile based on fields associated with
|
|
* the input fields to hash on, the flow type and use the VSI number to add
|
|
* a flow entry to the profile.
|
|
*/
|
|
int
|
|
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
|
|
const struct ice_rss_hash_cfg *cfg)
|
|
{
|
|
struct ice_rss_hash_cfg local_cfg;
|
|
int status;
|
|
|
|
if (!ice_is_vsi_valid(hw, vsi_handle) ||
|
|
!cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
|
|
cfg->hash_flds == ICE_HASH_INVALID)
|
|
return -EINVAL;
|
|
|
|
local_cfg = *cfg;
|
|
if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
|
|
mutex_lock(&hw->rss_locks);
|
|
status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
|
|
mutex_unlock(&hw->rss_locks);
|
|
} else {
|
|
mutex_lock(&hw->rss_locks);
|
|
local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
|
|
status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
|
|
if (!status) {
|
|
local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
|
|
status = ice_add_rss_cfg_sync(hw, vsi_handle,
|
|
&local_cfg);
|
|
}
|
|
mutex_unlock(&hw->rss_locks);
|
|
}
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_rem_rss_cfg_sync - remove an existing RSS configuration
|
|
* @hw: pointer to the hardware structure
|
|
* @vsi_handle: software VSI handle
|
|
* @cfg: configure parameters
|
|
*
|
|
* Assumption: lock has already been acquired for RSS list
|
|
*/
|
|
static int
|
|
ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
|
|
const struct ice_rss_hash_cfg *cfg)
|
|
{
|
|
const enum ice_block blk = ICE_BLK_RSS;
|
|
struct ice_flow_seg_info *segs;
|
|
struct ice_flow_prof *prof;
|
|
u8 segs_cnt;
|
|
int status;
|
|
|
|
segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
|
|
ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
|
|
segs = devm_kcalloc(ice_hw_to_dev(hw), segs_cnt, sizeof(*segs),
|
|
GFP_KERNEL);
|
|
if (!segs)
|
|
return -ENOMEM;
|
|
|
|
/* Construct the packet segment info from the hashed fields */
|
|
status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
|
|
if (status)
|
|
goto out;
|
|
|
|
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
|
|
vsi_handle,
|
|
ICE_FLOW_FIND_PROF_CHK_FLDS);
|
|
if (!prof) {
|
|
status = -ENOENT;
|
|
goto out;
|
|
}
|
|
|
|
status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
|
|
if (status)
|
|
goto out;
|
|
|
|
/* Remove RSS configuration from VSI context before deleting
|
|
* the flow profile.
|
|
*/
|
|
ice_rem_rss_list(hw, vsi_handle, prof);
|
|
|
|
if (bitmap_empty(prof->vsis, ICE_MAX_VSI))
|
|
status = ice_flow_rem_prof(hw, blk, prof->id);
|
|
|
|
out:
|
|
devm_kfree(ice_hw_to_dev(hw), segs);
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
|
|
* @hw: pointer to the hardware structure
|
|
* @vsi_handle: software VSI handle
|
|
* @cfg: configure parameters
|
|
*
|
|
* This function will lookup the flow profile based on the input
|
|
* hash field bitmap, iterate through the profile entry list of
|
|
* that profile and find entry associated with input VSI to be
|
|
* removed. Calls are made to underlying flow apis which will in
|
|
* turn build or update buffers for RSS XLT1 section.
|
|
*/
|
|
int
|
|
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
|
|
const struct ice_rss_hash_cfg *cfg)
|
|
{
|
|
struct ice_rss_hash_cfg local_cfg;
|
|
int status;
|
|
|
|
if (!ice_is_vsi_valid(hw, vsi_handle) ||
|
|
!cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
|
|
cfg->hash_flds == ICE_HASH_INVALID)
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&hw->rss_locks);
|
|
local_cfg = *cfg;
|
|
if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
|
|
status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
|
|
} else {
|
|
local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
|
|
status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
|
|
|
|
if (!status) {
|
|
local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
|
|
status = ice_rem_rss_cfg_sync(hw, vsi_handle,
|
|
&local_cfg);
|
|
}
|
|
}
|
|
mutex_unlock(&hw->rss_locks);
|
|
|
|
return status;
|
|
}
|
|
|
|
/* Mapping of AVF hash bit fields to an L3-L4 hash combination.
|
|
* As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
|
|
* convert its values to their appropriate flow L3, L4 values.
|
|
*/
|
|
#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
|
|
(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
|
|
BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
|
|
#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
|
|
(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
|
|
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
|
|
#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
|
|
(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
|
|
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
|
|
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
|
|
#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
|
|
(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
|
|
ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
|
|
|
|
#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
|
|
(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
|
|
BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
|
|
#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
|
|
(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
|
|
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
|
|
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
|
|
#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
|
|
(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
|
|
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
|
|
#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
|
|
(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
|
|
ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
|
|
|
|
/**
|
|
* ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
|
|
* @hw: pointer to the hardware structure
|
|
* @vsi_handle: software VSI handle
|
|
* @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
|
|
*
|
|
* This function will take the hash bitmap provided by the AVF driver via a
|
|
* message, convert it to ICE-compatible values, and configure RSS flow
|
|
* profiles.
|
|
*/
|
|
int
|
|
ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
|
|
{
|
|
struct ice_rss_hash_cfg hcfg;
|
|
int status = 0;
|
|
u64 hash_flds;
|
|
|
|
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
|
|
!ice_is_vsi_valid(hw, vsi_handle))
|
|
return -EINVAL;
|
|
|
|
/* Make sure no unsupported bits are specified */
|
|
if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
|
|
ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
|
|
return -EIO;
|
|
|
|
hash_flds = avf_hash;
|
|
|
|
/* Always create an L3 RSS configuration for any L4 RSS configuration */
|
|
if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
|
|
hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
|
|
|
|
if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
|
|
hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
|
|
|
|
/* Create the corresponding RSS configuration for each valid hash bit */
|
|
while (hash_flds) {
|
|
u64 rss_hash = ICE_HASH_INVALID;
|
|
|
|
if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
|
|
if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
|
|
rss_hash = ICE_FLOW_HASH_IPV4;
|
|
hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
|
|
} else if (hash_flds &
|
|
ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
|
|
rss_hash = ICE_FLOW_HASH_IPV4 |
|
|
ICE_FLOW_HASH_TCP_PORT;
|
|
hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
|
|
} else if (hash_flds &
|
|
ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
|
|
rss_hash = ICE_FLOW_HASH_IPV4 |
|
|
ICE_FLOW_HASH_UDP_PORT;
|
|
hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
|
|
} else if (hash_flds &
|
|
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
|
|
rss_hash = ICE_FLOW_HASH_IPV4 |
|
|
ICE_FLOW_HASH_SCTP_PORT;
|
|
hash_flds &=
|
|
~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
|
|
}
|
|
} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
|
|
if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
|
|
rss_hash = ICE_FLOW_HASH_IPV6;
|
|
hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
|
|
} else if (hash_flds &
|
|
ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
|
|
rss_hash = ICE_FLOW_HASH_IPV6 |
|
|
ICE_FLOW_HASH_TCP_PORT;
|
|
hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
|
|
} else if (hash_flds &
|
|
ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
|
|
rss_hash = ICE_FLOW_HASH_IPV6 |
|
|
ICE_FLOW_HASH_UDP_PORT;
|
|
hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
|
|
} else if (hash_flds &
|
|
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
|
|
rss_hash = ICE_FLOW_HASH_IPV6 |
|
|
ICE_FLOW_HASH_SCTP_PORT;
|
|
hash_flds &=
|
|
~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
|
|
}
|
|
}
|
|
|
|
if (rss_hash == ICE_HASH_INVALID)
|
|
return -EIO;
|
|
|
|
hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
|
|
hcfg.hash_flds = rss_hash;
|
|
hcfg.symm = false;
|
|
hcfg.hdr_type = ICE_RSS_ANY_HEADERS;
|
|
status = ice_add_rss_cfg(hw, vsi_handle, &hcfg);
|
|
if (status)
|
|
break;
|
|
}
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_replay_rss_cfg - replay RSS configurations associated with VSI
|
|
* @hw: pointer to the hardware structure
|
|
* @vsi_handle: software VSI handle
|
|
*/
|
|
int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
|
|
{
|
|
struct ice_rss_cfg *r;
|
|
int status = 0;
|
|
|
|
if (!ice_is_vsi_valid(hw, vsi_handle))
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&hw->rss_locks);
|
|
list_for_each_entry(r, &hw->rss_list_head, l_entry) {
|
|
if (test_bit(vsi_handle, r->vsis)) {
|
|
status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
|
|
if (status)
|
|
break;
|
|
}
|
|
}
|
|
mutex_unlock(&hw->rss_locks);
|
|
|
|
return status;
|
|
}
|
|
|
|
/**
|
|
* ice_get_rss_cfg - returns hashed fields for the given header types
|
|
* @hw: pointer to the hardware structure
|
|
* @vsi_handle: software VSI handle
|
|
* @hdrs: protocol header type
|
|
*
|
|
* This function will return the match fields of the first instance of flow
|
|
* profile having the given header types and containing input VSI
|
|
*/
|
|
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
|
|
{
|
|
u64 rss_hash = ICE_HASH_INVALID;
|
|
struct ice_rss_cfg *r;
|
|
|
|
/* verify if the protocol header is non zero and VSI is valid */
|
|
if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
|
|
return ICE_HASH_INVALID;
|
|
|
|
mutex_lock(&hw->rss_locks);
|
|
list_for_each_entry(r, &hw->rss_list_head, l_entry)
|
|
if (test_bit(vsi_handle, r->vsis) &&
|
|
r->hash.addl_hdrs == hdrs) {
|
|
rss_hash = r->hash.hash_flds;
|
|
break;
|
|
}
|
|
mutex_unlock(&hw->rss_locks);
|
|
|
|
return rss_hash;
|
|
}
|