2018-05-31 16:07:43 +08:00
|
|
|
/*
|
|
|
|
* RSS and Classifier helpers for Marvell PPv2 Network Controller
|
|
|
|
*
|
|
|
|
* Copyright (C) 2014 Marvell
|
|
|
|
*
|
|
|
|
* Marcin Wojtas <mw@semihalf.com>
|
|
|
|
*
|
|
|
|
* This file is licensed under the terms of the GNU General Public
|
|
|
|
* License version 2. This program is licensed "as is" without any
|
|
|
|
* warranty of any kind, whether express or implied.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "mvpp2.h"
|
|
|
|
#include "mvpp2_cls.h"
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
#include "mvpp2_prs.h"
|
2018-05-31 16:07:43 +08:00
|
|
|
|
net: mvpp2: split ingress traffic into multiple flows
The PPv2 classifier allows to perform classification operations on each
ingress packet, based on the flow the packet is assigned to.
The current code uses only 1 flow per port, and the only classification
action consists of assigning the rx queue to the packet, depending on the
port.
In preparation for adding RSS support, we have to split all incoming
traffic into different flows. Since RSS assigns a rx queue depending on
the hash of some header fields, we have to make sure that the hash is
generated in a consistent way for all packets in the same flow.
What we call a "flow" is actually a set of attributes attached to a
packet that depends on various L2/L3/L4 info.
This patch introduces 52 flows, wich are a combination of various L2, L3
and L4 attributes :
- Whether or not the packet has a VLAN tag
- Whether the packet is IPv4, IPv6 or something else
- Whether the packet is TCP, UDP or something else
- Whether or not the packet is fragmented at L3 level.
The flow is associated to a packet by the Header Parser. Each flow
corresponds to an entry in the decoding table. This entry then points to
the sequence of classification lookups to be performed by the
classifier, represented in the flow table.
For now, the only lookup we perform is a C2 lookup to set the default
rx queue.
Header parser Dec table
Ingress pkt +-------------+ flow id +----------------------------+
------------->| TCAM + SRAM |-------->|TCP IPv4 w/ VLAN, not frag |
+-------------+ |TCP IPv4 w/o VLAN, not frag |
|TCP IPv4 w/ VLAN, frag |--+
|etc. | |
+----------------------------+ |
|
Flow table |
+------------+ +---------------------+ |
To RxQ <---| Classifier |<-------| flow 0: C2 lookup |<--------+
+------------+ | flow 1: C2 lookup |
| | ... |
+------------+ | flow 51 : C2 lookup |
| C2 engine | +---------------------+
+------------+
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:25 +08:00
|
|
|
#define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \
|
|
|
|
{ \
|
|
|
|
.flow_type = _type, \
|
|
|
|
.flow_id = _id, \
|
|
|
|
.supported_hash_opts = _opts, \
|
|
|
|
.prs_ri = { \
|
|
|
|
.ri = _ri, \
|
|
|
|
.ri_mask = _ri_mask \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
|
|
|
|
/* TCP over IPv4 flows, Not fragmented, no vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_5T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
|
|
|
|
MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_5T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
|
|
|
|
MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_5T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
|
|
|
|
MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
/* TCP over IPv4 flows, Not fragmented, with vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
/* TCP over IPv4 flows, fragmented, no vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
|
|
|
|
MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
|
|
|
|
MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
|
|
|
|
MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
/* TCP over IPv4 flows, fragmented, with vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
/* UDP over IPv4 flows, Not fragmented, no vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_5T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
|
|
|
|
MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_5T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
|
|
|
|
MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_5T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
|
|
|
|
MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
/* UDP over IPv4 flows, Not fragmented, with vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
/* UDP over IPv4 flows, fragmented, no vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
|
|
|
|
MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
|
|
|
|
MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
|
|
|
|
MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
/* UDP over IPv4 flows, fragmented, with vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
/* TCP over IPv6 flows, not fragmented, no vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_5T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
|
|
|
|
MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_5T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
|
|
|
|
MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
/* TCP over IPv6 flows, not fragmented, with vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
/* TCP over IPv6 flows, fragmented, no vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
|
|
|
|
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
|
|
|
|
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
/* TCP over IPv6 flows, fragmented, with vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
|
|
|
|
MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
|
|
|
|
MVPP2_PRS_RI_L4_TCP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
/* UDP over IPv6 flows, not fragmented, no vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_5T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
|
|
|
|
MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_5T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
|
|
|
|
MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
/* UDP over IPv6 flows, not fragmented, with vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
/* UDP over IPv6 flows, fragmented, no vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
|
|
|
|
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
|
|
|
|
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
|
|
|
|
/* UDP over IPv6 flows, fragmented, with vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
|
|
|
|
MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
|
|
|
|
MVPP2_PRS_RI_L4_UDP,
|
|
|
|
MVPP2_PRS_IP_MASK),
|
|
|
|
|
|
|
|
/* IPv4 flows, no vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
|
|
|
|
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
|
|
|
|
MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
|
|
|
|
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
|
|
|
|
MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
|
|
|
|
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
|
|
|
|
|
|
|
|
/* IPv4 flows, with vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4,
|
|
|
|
MVPP2_PRS_RI_L3_PROTO_MASK),
|
|
|
|
MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4_OPT,
|
|
|
|
MVPP2_PRS_RI_L3_PROTO_MASK),
|
|
|
|
MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP4_OTHER,
|
|
|
|
MVPP2_PRS_RI_L3_PROTO_MASK),
|
|
|
|
|
|
|
|
/* IPv6 flows, no vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
|
|
|
|
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
|
|
|
|
MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_2T,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
|
|
|
|
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
|
|
|
|
|
|
|
|
/* IPv6 flows, with vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP6,
|
|
|
|
MVPP2_PRS_RI_L3_PROTO_MASK),
|
|
|
|
MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
|
|
|
|
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
MVPP2_PRS_RI_L3_IP6,
|
|
|
|
MVPP2_PRS_RI_L3_PROTO_MASK),
|
|
|
|
|
|
|
|
/* Non IP flow, no vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
|
|
|
|
0,
|
|
|
|
MVPP2_PRS_RI_VLAN_NONE,
|
|
|
|
MVPP2_PRS_RI_VLAN_MASK),
|
|
|
|
/* Non IP flow, with vlan tag */
|
|
|
|
MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
|
|
|
|
MVPP22_CLS_HEK_OPT_VLAN,
|
|
|
|
0, 0),
|
|
|
|
};
|
|
|
|
|
2018-07-12 19:54:26 +08:00
|
|
|
static void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
|
|
|
|
struct mvpp2_cls_flow_entry *fe)
|
|
|
|
{
|
|
|
|
fe->index = index;
|
|
|
|
mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index);
|
|
|
|
fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
|
|
|
|
fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
|
|
|
|
fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
|
|
|
|
}
|
|
|
|
|
2018-05-31 16:07:43 +08:00
|
|
|
/* Update classification flow table registers */
|
|
|
|
static void mvpp2_cls_flow_write(struct mvpp2 *priv,
|
|
|
|
struct mvpp2_cls_flow_entry *fe)
|
|
|
|
{
|
|
|
|
mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
|
|
|
|
mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
|
|
|
|
mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
|
|
|
|
mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update classification lookup table register */
|
|
|
|
static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
|
|
|
|
struct mvpp2_cls_lookup_entry *le)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
|
|
|
|
mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
|
|
|
|
mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
|
|
|
|
}
|
|
|
|
|
2018-07-12 19:54:26 +08:00
|
|
|
/* Operations on flow entry */
|
|
|
|
static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
|
|
|
|
{
|
|
|
|
return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
|
|
|
|
int num_of_fields)
|
|
|
|
{
|
|
|
|
fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
|
|
|
|
fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
|
|
|
|
int field_index)
|
|
|
|
{
|
|
|
|
return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
|
|
|
|
MVPP2_CLS_FLOW_TBL2_FLD_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
|
|
|
|
int field_index, int field_id)
|
|
|
|
{
|
|
|
|
fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
|
|
|
|
MVPP2_CLS_FLOW_TBL2_FLD_MASK);
|
|
|
|
fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
|
|
|
|
}
|
|
|
|
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
|
|
|
|
int engine)
|
|
|
|
{
|
|
|
|
fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
|
|
|
|
fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
|
|
|
|
bool from_packet)
|
|
|
|
{
|
|
|
|
if (from_packet)
|
|
|
|
fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
|
|
|
|
else
|
|
|
|
fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
|
|
|
|
{
|
|
|
|
fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
|
|
|
|
fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
|
|
|
|
bool is_last)
|
|
|
|
{
|
|
|
|
fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
|
|
|
|
fe->data[0] |= !!is_last;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
|
|
|
|
{
|
|
|
|
fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
|
|
|
|
fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
|
|
|
|
u32 port)
|
|
|
|
{
|
|
|
|
fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
|
|
|
|
}
|
|
|
|
|
net: mvpp2: split ingress traffic into multiple flows
The PPv2 classifier allows to perform classification operations on each
ingress packet, based on the flow the packet is assigned to.
The current code uses only 1 flow per port, and the only classification
action consists of assigning the rx queue to the packet, depending on the
port.
In preparation for adding RSS support, we have to split all incoming
traffic into different flows. Since RSS assigns a rx queue depending on
the hash of some header fields, we have to make sure that the hash is
generated in a consistent way for all packets in the same flow.
What we call a "flow" is actually a set of attributes attached to a
packet that depends on various L2/L3/L4 info.
This patch introduces 52 flows, wich are a combination of various L2, L3
and L4 attributes :
- Whether or not the packet has a VLAN tag
- Whether the packet is IPv4, IPv6 or something else
- Whether the packet is TCP, UDP or something else
- Whether or not the packet is fragmented at L3 level.
The flow is associated to a packet by the Header Parser. Each flow
corresponds to an entry in the decoding table. This entry then points to
the sequence of classification lookups to be performed by the
classifier, represented in the flow table.
For now, the only lookup we perform is a C2 lookup to set the default
rx queue.
Header parser Dec table
Ingress pkt +-------------+ flow id +----------------------------+
------------->| TCAM + SRAM |-------->|TCP IPv4 w/ VLAN, not frag |
+-------------+ |TCP IPv4 w/o VLAN, not frag |
|TCP IPv4 w/ VLAN, frag |--+
|etc. | |
+----------------------------+ |
|
Flow table |
+------------+ +---------------------+ |
To RxQ <---| Classifier |<-------| flow 0: C2 lookup |<--------+
+------------+ | flow 1: C2 lookup |
| | ... |
+------------+ | flow 51 : C2 lookup |
| C2 engine | +---------------------+
+------------+
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:25 +08:00
|
|
|
/* Initialize the parser entry for the given flow */
|
|
|
|
static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
|
|
|
|
struct mvpp2_cls_flow *flow)
|
|
|
|
{
|
|
|
|
mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
|
|
|
|
flow->prs_ri.ri_mask);
|
|
|
|
}
|
|
|
|
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
/* Initialize the Lookup Id table entry for the given flow */
|
net: mvpp2: split ingress traffic into multiple flows
The PPv2 classifier allows to perform classification operations on each
ingress packet, based on the flow the packet is assigned to.
The current code uses only 1 flow per port, and the only classification
action consists of assigning the rx queue to the packet, depending on the
port.
In preparation for adding RSS support, we have to split all incoming
traffic into different flows. Since RSS assigns a rx queue depending on
the hash of some header fields, we have to make sure that the hash is
generated in a consistent way for all packets in the same flow.
What we call a "flow" is actually a set of attributes attached to a
packet that depends on various L2/L3/L4 info.
This patch introduces 52 flows, wich are a combination of various L2, L3
and L4 attributes :
- Whether or not the packet has a VLAN tag
- Whether the packet is IPv4, IPv6 or something else
- Whether the packet is TCP, UDP or something else
- Whether or not the packet is fragmented at L3 level.
The flow is associated to a packet by the Header Parser. Each flow
corresponds to an entry in the decoding table. This entry then points to
the sequence of classification lookups to be performed by the
classifier, represented in the flow table.
For now, the only lookup we perform is a C2 lookup to set the default
rx queue.
Header parser Dec table
Ingress pkt +-------------+ flow id +----------------------------+
------------->| TCAM + SRAM |-------->|TCP IPv4 w/ VLAN, not frag |
+-------------+ |TCP IPv4 w/o VLAN, not frag |
|TCP IPv4 w/ VLAN, frag |--+
|etc. | |
+----------------------------+ |
|
Flow table |
+------------+ +---------------------+ |
To RxQ <---| Classifier |<-------| flow 0: C2 lookup |<--------+
+------------+ | flow 1: C2 lookup |
| | ... |
+------------+ | flow 51 : C2 lookup |
| C2 engine | +---------------------+
+------------+
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:25 +08:00
|
|
|
static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
|
|
|
|
struct mvpp2_cls_flow *flow)
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
{
|
|
|
|
struct mvpp2_cls_lookup_entry le;
|
|
|
|
|
|
|
|
le.way = 0;
|
net: mvpp2: split ingress traffic into multiple flows
The PPv2 classifier allows to perform classification operations on each
ingress packet, based on the flow the packet is assigned to.
The current code uses only 1 flow per port, and the only classification
action consists of assigning the rx queue to the packet, depending on the
port.
In preparation for adding RSS support, we have to split all incoming
traffic into different flows. Since RSS assigns a rx queue depending on
the hash of some header fields, we have to make sure that the hash is
generated in a consistent way for all packets in the same flow.
What we call a "flow" is actually a set of attributes attached to a
packet that depends on various L2/L3/L4 info.
This patch introduces 52 flows, wich are a combination of various L2, L3
and L4 attributes :
- Whether or not the packet has a VLAN tag
- Whether the packet is IPv4, IPv6 or something else
- Whether the packet is TCP, UDP or something else
- Whether or not the packet is fragmented at L3 level.
The flow is associated to a packet by the Header Parser. Each flow
corresponds to an entry in the decoding table. This entry then points to
the sequence of classification lookups to be performed by the
classifier, represented in the flow table.
For now, the only lookup we perform is a C2 lookup to set the default
rx queue.
Header parser Dec table
Ingress pkt +-------------+ flow id +----------------------------+
------------->| TCAM + SRAM |-------->|TCP IPv4 w/ VLAN, not frag |
+-------------+ |TCP IPv4 w/o VLAN, not frag |
|TCP IPv4 w/ VLAN, frag |--+
|etc. | |
+----------------------------+ |
|
Flow table |
+------------+ +---------------------+ |
To RxQ <---| Classifier |<-------| flow 0: C2 lookup |<--------+
+------------+ | flow 1: C2 lookup |
| | ... |
+------------+ | flow 51 : C2 lookup |
| C2 engine | +---------------------+
+------------+
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:25 +08:00
|
|
|
le.lkpid = flow->flow_id;
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
|
|
|
|
/* The default RxQ for this port is set in the C2 lookup */
|
|
|
|
le.data = 0;
|
|
|
|
|
net: mvpp2: split ingress traffic into multiple flows
The PPv2 classifier allows to perform classification operations on each
ingress packet, based on the flow the packet is assigned to.
The current code uses only 1 flow per port, and the only classification
action consists of assigning the rx queue to the packet, depending on the
port.
In preparation for adding RSS support, we have to split all incoming
traffic into different flows. Since RSS assigns a rx queue depending on
the hash of some header fields, we have to make sure that the hash is
generated in a consistent way for all packets in the same flow.
What we call a "flow" is actually a set of attributes attached to a
packet that depends on various L2/L3/L4 info.
This patch introduces 52 flows, wich are a combination of various L2, L3
and L4 attributes :
- Whether or not the packet has a VLAN tag
- Whether the packet is IPv4, IPv6 or something else
- Whether the packet is TCP, UDP or something else
- Whether or not the packet is fragmented at L3 level.
The flow is associated to a packet by the Header Parser. Each flow
corresponds to an entry in the decoding table. This entry then points to
the sequence of classification lookups to be performed by the
classifier, represented in the flow table.
For now, the only lookup we perform is a C2 lookup to set the default
rx queue.
Header parser Dec table
Ingress pkt +-------------+ flow id +----------------------------+
------------->| TCAM + SRAM |-------->|TCP IPv4 w/ VLAN, not frag |
+-------------+ |TCP IPv4 w/o VLAN, not frag |
|TCP IPv4 w/ VLAN, frag |--+
|etc. | |
+----------------------------+ |
|
Flow table |
+------------+ +---------------------+ |
To RxQ <---| Classifier |<-------| flow 0: C2 lookup |<--------+
+------------+ | flow 1: C2 lookup |
| | ... |
+------------+ | flow 51 : C2 lookup |
| C2 engine | +---------------------+
+------------+
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:25 +08:00
|
|
|
/* We point on the first lookup in the sequence for the flow, that is
|
|
|
|
* the C2 lookup.
|
|
|
|
*/
|
|
|
|
le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
|
|
|
|
|
|
|
|
/* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
|
|
|
|
|
|
|
|
mvpp2_cls_lookup_write(priv, &le);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the flow table entries for the given flow */
|
net: mvpp2: split ingress traffic into multiple flows
The PPv2 classifier allows to perform classification operations on each
ingress packet, based on the flow the packet is assigned to.
The current code uses only 1 flow per port, and the only classification
action consists of assigning the rx queue to the packet, depending on the
port.
In preparation for adding RSS support, we have to split all incoming
traffic into different flows. Since RSS assigns a rx queue depending on
the hash of some header fields, we have to make sure that the hash is
generated in a consistent way for all packets in the same flow.
What we call a "flow" is actually a set of attributes attached to a
packet that depends on various L2/L3/L4 info.
This patch introduces 52 flows, wich are a combination of various L2, L3
and L4 attributes :
- Whether or not the packet has a VLAN tag
- Whether the packet is IPv4, IPv6 or something else
- Whether the packet is TCP, UDP or something else
- Whether or not the packet is fragmented at L3 level.
The flow is associated to a packet by the Header Parser. Each flow
corresponds to an entry in the decoding table. This entry then points to
the sequence of classification lookups to be performed by the
classifier, represented in the flow table.
For now, the only lookup we perform is a C2 lookup to set the default
rx queue.
Header parser Dec table
Ingress pkt +-------------+ flow id +----------------------------+
------------->| TCAM + SRAM |-------->|TCP IPv4 w/ VLAN, not frag |
+-------------+ |TCP IPv4 w/o VLAN, not frag |
|TCP IPv4 w/ VLAN, frag |--+
|etc. | |
+----------------------------+ |
|
Flow table |
+------------+ +---------------------+ |
To RxQ <---| Classifier |<-------| flow 0: C2 lookup |<--------+
+------------+ | flow 1: C2 lookup |
| | ... |
+------------+ | flow 51 : C2 lookup |
| C2 engine | +---------------------+
+------------+
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:25 +08:00
|
|
|
static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
{
|
|
|
|
struct mvpp2_cls_flow_entry fe;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* C2 lookup */
|
|
|
|
memset(&fe, 0, sizeof(fe));
|
net: mvpp2: split ingress traffic into multiple flows
The PPv2 classifier allows to perform classification operations on each
ingress packet, based on the flow the packet is assigned to.
The current code uses only 1 flow per port, and the only classification
action consists of assigning the rx queue to the packet, depending on the
port.
In preparation for adding RSS support, we have to split all incoming
traffic into different flows. Since RSS assigns a rx queue depending on
the hash of some header fields, we have to make sure that the hash is
generated in a consistent way for all packets in the same flow.
What we call a "flow" is actually a set of attributes attached to a
packet that depends on various L2/L3/L4 info.
This patch introduces 52 flows, wich are a combination of various L2, L3
and L4 attributes :
- Whether or not the packet has a VLAN tag
- Whether the packet is IPv4, IPv6 or something else
- Whether the packet is TCP, UDP or something else
- Whether or not the packet is fragmented at L3 level.
The flow is associated to a packet by the Header Parser. Each flow
corresponds to an entry in the decoding table. This entry then points to
the sequence of classification lookups to be performed by the
classifier, represented in the flow table.
For now, the only lookup we perform is a C2 lookup to set the default
rx queue.
Header parser Dec table
Ingress pkt +-------------+ flow id +----------------------------+
------------->| TCAM + SRAM |-------->|TCP IPv4 w/ VLAN, not frag |
+-------------+ |TCP IPv4 w/o VLAN, not frag |
|TCP IPv4 w/ VLAN, frag |--+
|etc. | |
+----------------------------+ |
|
Flow table |
+------------+ +---------------------+ |
To RxQ <---| Classifier |<-------| flow 0: C2 lookup |<--------+
+------------+ | flow 1: C2 lookup |
| | ... |
+------------+ | flow 51 : C2 lookup |
| C2 engine | +---------------------+
+------------+
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:25 +08:00
|
|
|
fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
|
|
|
|
mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
|
|
|
|
mvpp2_cls_flow_port_id_sel(&fe, true);
|
2018-07-12 19:54:26 +08:00
|
|
|
mvpp2_cls_flow_last_set(&fe, 0);
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
mvpp2_cls_flow_pri_set(&fe, 0);
|
2018-07-12 19:54:26 +08:00
|
|
|
mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
|
|
|
|
/* Add all ports */
|
|
|
|
for (i = 0; i < MVPP2_MAX_PORTS; i++)
|
|
|
|
mvpp2_cls_flow_port_add(&fe, BIT(i));
|
|
|
|
|
|
|
|
mvpp2_cls_flow_write(priv, &fe);
|
2018-07-12 19:54:26 +08:00
|
|
|
|
|
|
|
/* C3Hx lookups */
|
|
|
|
for (i = 0; i < MVPP2_MAX_PORTS; i++) {
|
|
|
|
memset(&fe, 0, sizeof(fe));
|
|
|
|
fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
|
|
|
|
|
|
|
|
mvpp2_cls_flow_port_id_sel(&fe, true);
|
|
|
|
mvpp2_cls_flow_pri_set(&fe, i + 1);
|
|
|
|
mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
|
|
|
|
mvpp2_cls_flow_port_add(&fe, BIT(i));
|
|
|
|
|
|
|
|
mvpp2_cls_flow_write(priv, &fe);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the last entry */
|
|
|
|
mvpp2_cls_flow_last_set(&fe, 1);
|
|
|
|
mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
|
|
|
|
|
|
|
|
mvpp2_cls_flow_write(priv, &fe);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Adds a field to the Header Extracted Key generation parameters*/
|
|
|
|
static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
|
|
|
|
u32 field_id)
|
|
|
|
{
|
|
|
|
int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
|
|
|
|
|
|
|
|
if (nb_fields == MVPP2_FLOW_N_FIELDS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mvpp2_cls_flow_hek_set(fe, nb_fields, field_id);
|
|
|
|
|
|
|
|
mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
|
|
|
|
unsigned long hash_opts)
|
|
|
|
{
|
|
|
|
u32 field_id;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Clear old fields */
|
|
|
|
mvpp2_cls_flow_hek_num_set(fe, 0);
|
|
|
|
fe->data[2] = 0;
|
|
|
|
|
|
|
|
for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
|
|
|
|
switch (BIT(i)) {
|
|
|
|
case MVPP22_CLS_HEK_OPT_VLAN:
|
|
|
|
field_id = MVPP22_CLS_FIELD_VLAN;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_HEK_OPT_IP4SA:
|
|
|
|
field_id = MVPP22_CLS_FIELD_IP4SA;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_HEK_OPT_IP4DA:
|
|
|
|
field_id = MVPP22_CLS_FIELD_IP4DA;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_HEK_OPT_IP6SA:
|
|
|
|
field_id = MVPP22_CLS_FIELD_IP6SA;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_HEK_OPT_IP6DA:
|
|
|
|
field_id = MVPP22_CLS_FIELD_IP6DA;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_HEK_OPT_L4SIP:
|
|
|
|
field_id = MVPP22_CLS_FIELD_L4SIP;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_HEK_OPT_L4DIP:
|
|
|
|
field_id = MVPP22_CLS_FIELD_L4DIP;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (mvpp2_flow_add_hek_field(fe, field_id))
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
}
|
|
|
|
|
net: mvpp2: split ingress traffic into multiple flows
The PPv2 classifier allows to perform classification operations on each
ingress packet, based on the flow the packet is assigned to.
The current code uses only 1 flow per port, and the only classification
action consists of assigning the rx queue to the packet, depending on the
port.
In preparation for adding RSS support, we have to split all incoming
traffic into different flows. Since RSS assigns a rx queue depending on
the hash of some header fields, we have to make sure that the hash is
generated in a consistent way for all packets in the same flow.
What we call a "flow" is actually a set of attributes attached to a
packet that depends on various L2/L3/L4 info.
This patch introduces 52 flows, wich are a combination of various L2, L3
and L4 attributes :
- Whether or not the packet has a VLAN tag
- Whether the packet is IPv4, IPv6 or something else
- Whether the packet is TCP, UDP or something else
- Whether or not the packet is fragmented at L3 level.
The flow is associated to a packet by the Header Parser. Each flow
corresponds to an entry in the decoding table. This entry then points to
the sequence of classification lookups to be performed by the
classifier, represented in the flow table.
For now, the only lookup we perform is a C2 lookup to set the default
rx queue.
Header parser Dec table
Ingress pkt +-------------+ flow id +----------------------------+
------------->| TCAM + SRAM |-------->|TCP IPv4 w/ VLAN, not frag |
+-------------+ |TCP IPv4 w/o VLAN, not frag |
|TCP IPv4 w/ VLAN, frag |--+
|etc. | |
+----------------------------+ |
|
Flow table |
+------------+ +---------------------+ |
To RxQ <---| Classifier |<-------| flow 0: C2 lookup |<--------+
+------------+ | flow 1: C2 lookup |
| | ... |
+------------+ | flow 51 : C2 lookup |
| C2 engine | +---------------------+
+------------+
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:25 +08:00
|
|
|
struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
|
|
|
|
{
|
|
|
|
if (flow >= MVPP2_N_FLOWS)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return &cls_flows[flow];
|
|
|
|
}
|
|
|
|
|
2018-07-12 19:54:26 +08:00
|
|
|
/* Set the hash generation options for the given traffic flow.
|
|
|
|
* One traffic flow (in the ethtool sense) has multiple classification flows,
|
|
|
|
* to handle specific cases such as fragmentation, or the presence of a
|
|
|
|
* VLAN / DSA Tag.
|
|
|
|
*
|
|
|
|
* Each of these individual flows has different constraints, for example we
|
|
|
|
* can't hash fragmented packets on L4 data (else we would risk having packet
|
|
|
|
* re-ordering), so each classification flows masks the options with their
|
|
|
|
* supported ones.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
|
|
|
|
u16 requested_opts)
|
|
|
|
{
|
|
|
|
struct mvpp2_cls_flow_entry fe;
|
|
|
|
struct mvpp2_cls_flow *flow;
|
|
|
|
int i, engine, flow_index;
|
|
|
|
u16 hash_opts;
|
|
|
|
|
|
|
|
for (i = 0; i < MVPP2_N_FLOWS; i++) {
|
|
|
|
flow = mvpp2_cls_flow_get(i);
|
|
|
|
if (!flow)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (flow->flow_type != flow_type)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
|
|
|
|
flow->flow_id);
|
|
|
|
|
|
|
|
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
|
|
|
|
|
|
|
|
hash_opts = flow->supported_hash_opts & requested_opts;
|
|
|
|
|
|
|
|
/* Use C3HB engine to access L4 infos. This adds L4 infos to the
|
|
|
|
* hash parameters
|
|
|
|
*/
|
|
|
|
if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
|
|
|
|
engine = MVPP22_CLS_ENGINE_C3HB;
|
|
|
|
else
|
|
|
|
engine = MVPP22_CLS_ENGINE_C3HA;
|
|
|
|
|
|
|
|
if (mvpp2_flow_set_hek_fields(&fe, hash_opts))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mvpp2_cls_flow_eng_set(&fe, engine);
|
|
|
|
|
|
|
|
mvpp2_cls_flow_write(port->priv, &fe);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
|
|
|
|
{
|
|
|
|
u16 hash_opts = 0;
|
|
|
|
int n_fields, i, field;
|
|
|
|
|
|
|
|
n_fields = mvpp2_cls_flow_hek_num_get(fe);
|
|
|
|
|
|
|
|
for (i = 0; i < n_fields; i++) {
|
|
|
|
field = mvpp2_cls_flow_hek_get(fe, i);
|
|
|
|
|
|
|
|
switch (field) {
|
|
|
|
case MVPP22_CLS_FIELD_MAC_DA:
|
|
|
|
hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_FIELD_VLAN:
|
|
|
|
hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_FIELD_L3_PROTO:
|
|
|
|
hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_FIELD_IP4SA:
|
|
|
|
hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_FIELD_IP4DA:
|
|
|
|
hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_FIELD_IP6SA:
|
|
|
|
hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_FIELD_IP6DA:
|
|
|
|
hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_FIELD_L4SIP:
|
|
|
|
hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
|
|
|
|
break;
|
|
|
|
case MVPP22_CLS_FIELD_L4DIP:
|
|
|
|
hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return hash_opts;
|
|
|
|
}
|
|
|
|
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
|
|
|
|
{
|
net: mvpp2: split ingress traffic into multiple flows
The PPv2 classifier allows to perform classification operations on each
ingress packet, based on the flow the packet is assigned to.
The current code uses only 1 flow per port, and the only classification
action consists of assigning the rx queue to the packet, depending on the
port.
In preparation for adding RSS support, we have to split all incoming
traffic into different flows. Since RSS assigns a rx queue depending on
the hash of some header fields, we have to make sure that the hash is
generated in a consistent way for all packets in the same flow.
What we call a "flow" is actually a set of attributes attached to a
packet that depends on various L2/L3/L4 info.
This patch introduces 52 flows, wich are a combination of various L2, L3
and L4 attributes :
- Whether or not the packet has a VLAN tag
- Whether the packet is IPv4, IPv6 or something else
- Whether the packet is TCP, UDP or something else
- Whether or not the packet is fragmented at L3 level.
The flow is associated to a packet by the Header Parser. Each flow
corresponds to an entry in the decoding table. This entry then points to
the sequence of classification lookups to be performed by the
classifier, represented in the flow table.
For now, the only lookup we perform is a C2 lookup to set the default
rx queue.
Header parser Dec table
Ingress pkt +-------------+ flow id +----------------------------+
------------->| TCAM + SRAM |-------->|TCP IPv4 w/ VLAN, not frag |
+-------------+ |TCP IPv4 w/o VLAN, not frag |
|TCP IPv4 w/ VLAN, frag |--+
|etc. | |
+----------------------------+ |
|
Flow table |
+------------+ +---------------------+ |
To RxQ <---| Classifier |<-------| flow 0: C2 lookup |<--------+
+------------+ | flow 1: C2 lookup |
| | ... |
+------------+ | flow 51 : C2 lookup |
| C2 engine | +---------------------+
+------------+
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:25 +08:00
|
|
|
struct mvpp2_cls_flow *flow;
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
int i;
|
|
|
|
|
net: mvpp2: split ingress traffic into multiple flows
The PPv2 classifier allows to perform classification operations on each
ingress packet, based on the flow the packet is assigned to.
The current code uses only 1 flow per port, and the only classification
action consists of assigning the rx queue to the packet, depending on the
port.
In preparation for adding RSS support, we have to split all incoming
traffic into different flows. Since RSS assigns a rx queue depending on
the hash of some header fields, we have to make sure that the hash is
generated in a consistent way for all packets in the same flow.
What we call a "flow" is actually a set of attributes attached to a
packet that depends on various L2/L3/L4 info.
This patch introduces 52 flows, wich are a combination of various L2, L3
and L4 attributes :
- Whether or not the packet has a VLAN tag
- Whether the packet is IPv4, IPv6 or something else
- Whether the packet is TCP, UDP or something else
- Whether or not the packet is fragmented at L3 level.
The flow is associated to a packet by the Header Parser. Each flow
corresponds to an entry in the decoding table. This entry then points to
the sequence of classification lookups to be performed by the
classifier, represented in the flow table.
For now, the only lookup we perform is a C2 lookup to set the default
rx queue.
Header parser Dec table
Ingress pkt +-------------+ flow id +----------------------------+
------------->| TCAM + SRAM |-------->|TCP IPv4 w/ VLAN, not frag |
+-------------+ |TCP IPv4 w/o VLAN, not frag |
|TCP IPv4 w/ VLAN, frag |--+
|etc. | |
+----------------------------+ |
|
Flow table |
+------------+ +---------------------+ |
To RxQ <---| Classifier |<-------| flow 0: C2 lookup |<--------+
+------------+ | flow 1: C2 lookup |
| | ... |
+------------+ | flow 51 : C2 lookup |
| C2 engine | +---------------------+
+------------+
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:25 +08:00
|
|
|
for (i = 0; i < MVPP2_N_FLOWS; i++) {
|
|
|
|
flow = mvpp2_cls_flow_get(i);
|
|
|
|
if (!flow)
|
|
|
|
break;
|
|
|
|
|
|
|
|
mvpp2_cls_flow_prs_init(priv, flow);
|
|
|
|
mvpp2_cls_flow_lkp_init(priv, flow);
|
|
|
|
mvpp2_cls_flow_init(priv, flow);
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mvpp2_cls_c2_write(struct mvpp2 *priv,
|
|
|
|
struct mvpp2_cls_c2_entry *c2)
|
|
|
|
{
|
|
|
|
mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
|
|
|
|
|
|
|
|
/* Write TCAM */
|
|
|
|
mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
|
|
|
|
mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
|
|
|
|
mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
|
|
|
|
mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
|
|
|
|
mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
|
|
|
|
|
|
|
|
mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
|
|
|
|
|
|
|
|
mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
|
|
|
|
mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
|
|
|
|
mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
|
|
|
|
mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
|
|
|
|
}
|
|
|
|
|
2018-07-12 19:54:26 +08:00
|
|
|
static void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
|
|
|
|
struct mvpp2_cls_c2_entry *c2)
|
|
|
|
{
|
|
|
|
mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
|
|
|
|
|
|
|
|
c2->index = index;
|
|
|
|
|
|
|
|
c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
|
|
|
|
c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
|
|
|
|
c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
|
|
|
|
c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
|
|
|
|
c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
|
|
|
|
|
|
|
|
c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
|
|
|
|
|
|
|
|
c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
|
|
|
|
c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
|
|
|
|
c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
|
|
|
|
c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
|
|
|
|
}
|
|
|
|
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
|
|
|
|
{
|
|
|
|
struct mvpp2_cls_c2_entry c2;
|
|
|
|
u8 qh, ql, pmap;
|
|
|
|
|
|
|
|
memset(&c2, 0, sizeof(c2));
|
|
|
|
|
|
|
|
c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
|
|
|
|
|
|
|
|
pmap = BIT(port->id);
|
|
|
|
c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
|
|
|
|
c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
|
|
|
|
|
|
|
|
/* Update RSS status after matching this entry */
|
|
|
|
c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
|
|
|
|
|
|
|
|
/* Mark packet as "forwarded to software", needed for RSS */
|
|
|
|
c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
|
|
|
|
|
|
|
|
/* Configure the default rx queue : Update Queue Low and Queue High, but
|
|
|
|
* don't lock, since the rx queue selection might be overridden by RSS
|
|
|
|
*/
|
|
|
|
c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
|
|
|
|
MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
|
|
|
|
|
|
|
|
qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
|
|
|
|
ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
|
|
|
|
|
|
|
|
c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
|
|
|
|
MVPP22_CLS_C2_ATTR0_QLOW(ql);
|
|
|
|
|
|
|
|
mvpp2_cls_c2_write(port->priv, &c2);
|
|
|
|
}
|
|
|
|
|
2018-05-31 16:07:43 +08:00
|
|
|
/* Classifier default initialization */
|
|
|
|
void mvpp2_cls_init(struct mvpp2 *priv)
|
|
|
|
{
|
|
|
|
struct mvpp2_cls_lookup_entry le;
|
|
|
|
struct mvpp2_cls_flow_entry fe;
|
|
|
|
int index;
|
|
|
|
|
|
|
|
/* Enable classifier */
|
|
|
|
mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
|
|
|
|
|
|
|
|
/* Clear classifier flow table */
|
|
|
|
memset(&fe.data, 0, sizeof(fe.data));
|
|
|
|
for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
|
|
|
|
fe.index = index;
|
|
|
|
mvpp2_cls_flow_write(priv, &fe);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear classifier lookup table */
|
|
|
|
le.data = 0;
|
|
|
|
for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
|
|
|
|
le.lkpid = index;
|
|
|
|
le.way = 0;
|
|
|
|
mvpp2_cls_lookup_write(priv, &le);
|
|
|
|
|
|
|
|
le.way = 1;
|
|
|
|
mvpp2_cls_lookup_write(priv, &le);
|
|
|
|
}
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
|
|
|
|
mvpp2_cls_port_init_flows(priv);
|
2018-05-31 16:07:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void mvpp2_cls_port_config(struct mvpp2_port *port)
|
|
|
|
{
|
|
|
|
struct mvpp2_cls_lookup_entry le;
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
/* Set way for the port */
|
|
|
|
val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
|
|
|
|
val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
|
|
|
|
mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
|
|
|
|
|
|
|
|
/* Pick the entry to be accessed in lookup ID decoding table
|
|
|
|
* according to the way and lkpid.
|
|
|
|
*/
|
|
|
|
le.lkpid = port->id;
|
|
|
|
le.way = 0;
|
|
|
|
le.data = 0;
|
|
|
|
|
|
|
|
/* Set initial CPU queue for receiving packets */
|
|
|
|
le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
|
|
|
|
le.data |= port->first_rxq;
|
|
|
|
|
|
|
|
/* Disable classification engines */
|
|
|
|
le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
|
|
|
|
|
|
|
|
/* Update lookup ID table entry */
|
|
|
|
mvpp2_cls_lookup_write(port->priv, &le);
|
net: mvpp2: use classifier to assign default rx queue
The PPv2 Controller has a classifier, that can perform multiple lookup
operations for each packet, using different engines.
One of these engines is the C2 engine, which performs TCAM based lookups
on data extracted from the packet header. When a packet matches an
entry, the engine sets various attributes, used to perform
classification operations.
One of these attributes is the rx queue in which the packet should be sent.
The current code uses the lookup_id table (also called decoding table)
to assign the rx queue. However, this only works if we use one entry per
port in the decoding table, which won't be the case once we add RSS
lookups.
This patch uses the C2 engine to assign the rx queue to each packet.
The C2 engine is used through the flow table, which dictates what
classification operations are done for a given flow.
Right now, we have one flow per port, which contains every ingress
packet for this port.
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-12 19:54:24 +08:00
|
|
|
|
|
|
|
mvpp2_port_c2_cls_init(port);
|
2018-05-31 16:07:43 +08:00
|
|
|
}
|
|
|
|
|
2018-07-12 19:54:26 +08:00
|
|
|
static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
|
|
|
|
{
|
|
|
|
struct mvpp2_cls_c2_entry c2;
|
|
|
|
|
|
|
|
mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
|
|
|
|
|
|
|
|
c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
|
|
|
|
|
|
|
|
mvpp2_cls_c2_write(port->priv, &c2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
|
|
|
|
{
|
|
|
|
struct mvpp2_cls_c2_entry c2;
|
|
|
|
|
|
|
|
mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
|
|
|
|
|
|
|
|
c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
|
|
|
|
|
|
|
|
mvpp2_cls_c2_write(port->priv, &c2);
|
|
|
|
}
|
|
|
|
|
|
|
|
void mvpp22_rss_enable(struct mvpp2_port *port)
|
|
|
|
{
|
|
|
|
mvpp2_rss_port_c2_enable(port);
|
|
|
|
}
|
|
|
|
|
|
|
|
void mvpp22_rss_disable(struct mvpp2_port *port)
|
|
|
|
{
|
|
|
|
mvpp2_rss_port_c2_disable(port);
|
|
|
|
}
|
|
|
|
|
2018-05-31 16:07:43 +08:00
|
|
|
/* Set CPU queue number for oversize packets */
|
|
|
|
void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
|
|
|
|
port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
|
|
|
|
|
|
|
|
mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
|
|
|
|
(port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
|
|
|
|
|
|
|
|
val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
|
|
|
|
val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
|
|
|
|
mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
|
|
|
|
}
|
|
|
|
|
2018-07-12 19:54:21 +08:00
|
|
|
static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
|
|
|
|
{
|
2018-07-12 19:54:22 +08:00
|
|
|
int nrxqs, cpu, cpus = num_possible_cpus();
|
2018-07-12 19:54:21 +08:00
|
|
|
|
|
|
|
/* Number of RXQs per CPU */
|
|
|
|
nrxqs = port->nrxqs / cpus;
|
|
|
|
|
2018-07-12 19:54:22 +08:00
|
|
|
/* CPU that will handle this rx queue */
|
|
|
|
cpu = rxq / nrxqs;
|
|
|
|
|
|
|
|
if (!cpu_online(cpu))
|
|
|
|
return port->first_rxq;
|
|
|
|
|
2018-07-12 19:54:21 +08:00
|
|
|
/* Indirection to better distribute the paquets on the CPUs when
|
|
|
|
* configuring the RSS queues.
|
|
|
|
*/
|
|
|
|
return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
|
|
|
|
}
|
|
|
|
|
2018-07-12 19:54:20 +08:00
|
|
|
void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
|
|
|
|
{
|
|
|
|
struct mvpp2 *priv = port->priv;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
|
|
|
|
u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
|
|
|
|
MVPP22_RSS_INDEX_TABLE_ENTRY(i);
|
|
|
|
mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
|
|
|
|
|
2018-07-12 19:54:21 +08:00
|
|
|
mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
|
|
|
|
mvpp22_rxfh_indir(port, port->indir[i]));
|
2018-07-12 19:54:20 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-12 19:54:23 +08:00
|
|
|
void mvpp22_rss_port_init(struct mvpp2_port *port)
|
2018-05-31 16:07:43 +08:00
|
|
|
{
|
|
|
|
struct mvpp2 *priv = port->priv;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Set the table width: replace the whole classifier Rx queue number
|
|
|
|
* with the ones configured in RSS table entries.
|
|
|
|
*/
|
2018-07-12 19:54:19 +08:00
|
|
|
mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
|
2018-05-31 16:07:43 +08:00
|
|
|
mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
|
|
|
|
|
2018-07-12 19:54:19 +08:00
|
|
|
/* The default RxQ is used as a key to select the RSS table to use.
|
|
|
|
* We use one RSS table per port.
|
2018-05-31 16:07:43 +08:00
|
|
|
*/
|
2018-07-12 19:54:19 +08:00
|
|
|
mvpp2_write(priv, MVPP22_RSS_INDEX,
|
|
|
|
MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
|
|
|
|
mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
|
|
|
|
MVPP22_RSS_TABLE_POINTER(port->id));
|
2018-05-31 16:07:43 +08:00
|
|
|
|
|
|
|
/* Configure the first table to evenly distribute the packets across
|
2018-07-12 19:54:17 +08:00
|
|
|
* real Rx Queues. The table entries map a hash to a port Rx Queue.
|
2018-05-31 16:07:43 +08:00
|
|
|
*/
|
2018-07-12 19:54:20 +08:00
|
|
|
for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
|
|
|
|
port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
|
2018-05-31 16:07:43 +08:00
|
|
|
|
2018-07-12 19:54:20 +08:00
|
|
|
mvpp22_rss_fill_table(port, port->id);
|
2018-07-12 19:54:26 +08:00
|
|
|
|
|
|
|
/* Configure default flows */
|
|
|
|
mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
|
|
|
|
mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
|
|
|
|
mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
|
|
|
|
mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
|
|
|
|
mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
|
|
|
|
mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
|
2018-05-31 16:07:43 +08:00
|
|
|
}
|