Merge branch 'tc-taprio-offload-for-SJA1105-DSA'
Vladimir Oltean says: ==================== tc-taprio offload for SJA1105 DSA This is the third attempt to submit the tc-taprio offload model for inclusion in the networking tree. The sja1105 switch driver will provide the first implementation of the offload. Only the bare minimum is added: - The offload model and a DSA pass-through - The hardware implementation - The interaction with the netdev queues in the tagger code - Documentation What has been removed from previous attempts is support for PTP-as-clocksource in sja1105, as well as configuring the traffic class for management traffic. These will be added as soon as the offload model is settled. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
db539cae12
|
@ -146,6 +146,96 @@ enslaves eth0 and eth1 (the DSA master of the switch ports). This is because in
|
|||
this mode, the switch ports beneath br0 are not capable of regular traffic, and
|
||||
are only used as a conduit for switchdev operations.
|
||||
|
||||
Offloads
|
||||
========
|
||||
|
||||
Time-aware scheduling
|
||||
---------------------
|
||||
|
||||
The switch supports a variation of the enhancements for scheduled traffic
|
||||
specified in IEEE 802.1Q-2018 (formerly 802.1Qbv). This means it can be used to
|
||||
ensure deterministic latency for priority traffic that is sent in-band with its
|
||||
gate-open event in the network schedule.
|
||||
|
||||
This capability can be managed through the tc-taprio offload ('flags 2'). The
|
||||
difference compared to the software implementation of taprio is that the latter
|
||||
would only be able to shape traffic originated from the CPU, but not
|
||||
autonomously forwarded flows.
|
||||
|
||||
The device has 8 traffic classes, and maps incoming frames to one of them based
|
||||
on the VLAN PCP bits (if no VLAN is present, the port-based default is used).
|
||||
As described in the previous sections, depending on the value of
|
||||
``vlan_filtering``, the EtherType recognized by the switch as being VLAN can
|
||||
either be the typical 0x8100 or a custom value used internally by the driver
|
||||
for tagging. Therefore, the switch ignores the VLAN PCP if used in standalone
|
||||
or bridge mode with ``vlan_filtering=0``, as it will not recognize the 0x8100
|
||||
EtherType. In these modes, injecting into a particular TX queue can only be
|
||||
done by the DSA net devices, which populate the PCP field of the tagging header
|
||||
on egress. Using ``vlan_filtering=1``, the behavior is the other way around:
|
||||
offloaded flows can be steered to TX queues based on the VLAN PCP, but the DSA
|
||||
net devices are no longer able to do that. To inject frames into a hardware TX
|
||||
queue with VLAN awareness active, it is necessary to create a VLAN
|
||||
sub-interface on the DSA master port, and send normal (0x8100) VLAN-tagged
|
||||
towards the switch, with the VLAN PCP bits set appropriately.
|
||||
|
||||
Management traffic (having DMAC 01-80-C2-xx-xx-xx or 01-19-1B-xx-xx-xx) is the
|
||||
notable exception: the switch always treats it with a fixed priority and
|
||||
disregards any VLAN PCP bits even if present. The traffic class for management
|
||||
traffic has a value of 7 (highest priority) at the moment, which is not
|
||||
configurable in the driver.
|
||||
|
||||
Below is an example of configuring a 500 us cyclic schedule on egress port
|
||||
``swp5``. The traffic class gate for management traffic (7) is open for 100 us,
|
||||
and the gates for all other traffic classes are open for 400 us::
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
set -e -u -o pipefail
|
||||
|
||||
NSEC_PER_SEC="1000000000"
|
||||
|
||||
gatemask() {
|
||||
local tc_list="$1"
|
||||
local mask=0
|
||||
|
||||
for tc in ${tc_list}; do
|
||||
mask=$((${mask} | (1 << ${tc})))
|
||||
done
|
||||
|
||||
printf "%02x" ${mask}
|
||||
}
|
||||
|
||||
if ! systemctl is-active --quiet ptp4l; then
|
||||
echo "Please start the ptp4l service"
|
||||
exit
|
||||
fi
|
||||
|
||||
now=$(phc_ctl /dev/ptp1 get | gawk '/clock time is/ { print $5; }')
|
||||
# Phase-align the base time to the start of the next second.
|
||||
sec=$(echo "${now}" | gawk -F. '{ print $1; }')
|
||||
base_time="$(((${sec} + 1) * ${NSEC_PER_SEC}))"
|
||||
|
||||
tc qdisc add dev swp5 parent root handle 100 taprio \
|
||||
num_tc 8 \
|
||||
map 0 1 2 3 5 6 7 \
|
||||
queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \
|
||||
base-time ${base_time} \
|
||||
sched-entry S $(gatemask 7) 100000 \
|
||||
sched-entry S $(gatemask "0 1 2 3 4 5 6") 400000 \
|
||||
flags 2
|
||||
|
||||
It is possible to apply the tc-taprio offload on multiple egress ports. There
|
||||
are hardware restrictions related to the fact that no gate event may trigger
|
||||
simultaneously on two ports. The driver checks the consistency of the schedules
|
||||
against this restriction and errors out when appropriate. Schedule analysis is
|
||||
needed to avoid this, which is outside the scope of the document.
|
||||
|
||||
At the moment, the time-aware scheduler can only be triggered based on a
|
||||
standalone clock and not based on PTP time. This means the base-time argument
|
||||
from tc-taprio is ignored and the schedule starts right away. It also means it
|
||||
is more difficult to phase-align the scheduler with the other devices in the
|
||||
network.
|
||||
|
||||
Device Tree bindings and board design
|
||||
=====================================
|
||||
|
||||
|
|
|
@ -23,3 +23,11 @@ config NET_DSA_SJA1105_PTP
|
|||
help
|
||||
This enables support for timestamping and PTP clock manipulations in
|
||||
the SJA1105 DSA driver.
|
||||
|
||||
config NET_DSA_SJA1105_TAS
|
||||
bool "Support for the Time-Aware Scheduler on NXP SJA1105"
|
||||
depends on NET_DSA_SJA1105
|
||||
help
|
||||
This enables support for the TTEthernet-based egress scheduling
|
||||
engine in the SJA1105 DSA driver, which is controlled using a
|
||||
hardware offload of the tc-tqprio qdisc.
|
||||
|
|
|
@ -12,3 +12,7 @@ sja1105-objs := \
|
|||
ifdef CONFIG_NET_DSA_SJA1105_PTP
|
||||
sja1105-objs += sja1105_ptp.o
|
||||
endif
|
||||
|
||||
ifdef CONFIG_NET_DSA_SJA1105_TAS
|
||||
sja1105-objs += sja1105_tas.o
|
||||
endif
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
*/
|
||||
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
|
||||
|
||||
#include "sja1105_tas.h"
|
||||
|
||||
/* Keeps the different addresses between E/T and P/Q/R/S */
|
||||
struct sja1105_regs {
|
||||
u64 device_id;
|
||||
|
@ -104,6 +106,7 @@ struct sja1105_private {
|
|||
*/
|
||||
struct mutex mgmt_lock;
|
||||
struct sja1105_tagger_data tagger_data;
|
||||
struct sja1105_tas_data tas_data;
|
||||
};
|
||||
|
||||
#include "sja1105_dynamic_config.h"
|
||||
|
@ -120,6 +123,9 @@ typedef enum {
|
|||
SPI_WRITE = 1,
|
||||
} sja1105_spi_rw_mode_t;
|
||||
|
||||
/* From sja1105_main.c */
|
||||
int sja1105_static_config_reload(struct sja1105_private *priv);
|
||||
|
||||
/* From sja1105_spi.c */
|
||||
int sja1105_spi_send_packed_buf(const struct sja1105_private *priv,
|
||||
sja1105_spi_rw_mode_t rw, u64 reg_addr,
|
||||
|
|
|
@ -488,6 +488,8 @@ sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
|
|||
|
||||
/* SJA1105E/T: First generation */
|
||||
struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
|
||||
[BLK_IDX_SCHEDULE] = {0},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
|
||||
[BLK_IDX_L2_LOOKUP] = {
|
||||
.entry_packing = sja1105et_dyn_l2_lookup_entry_packing,
|
||||
.cmd_packing = sja1105et_l2_lookup_cmd_packing,
|
||||
|
@ -529,6 +531,8 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
|
|||
.packed_size = SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD,
|
||||
.addr = 0x36,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_PARAMS] = {0},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
|
||||
[BLK_IDX_L2_LOOKUP_PARAMS] = {
|
||||
.entry_packing = sja1105et_l2_lookup_params_entry_packing,
|
||||
.cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
|
||||
|
@ -552,6 +556,8 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
|
|||
|
||||
/* SJA1105P/Q/R/S: Second generation */
|
||||
struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
|
||||
[BLK_IDX_SCHEDULE] = {0},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
|
||||
[BLK_IDX_L2_LOOKUP] = {
|
||||
.entry_packing = sja1105pqrs_dyn_l2_lookup_entry_packing,
|
||||
.cmd_packing = sja1105pqrs_l2_lookup_cmd_packing,
|
||||
|
@ -593,6 +599,8 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
|
|||
.packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD,
|
||||
.addr = 0x4B,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_PARAMS] = {0},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
|
||||
[BLK_IDX_L2_LOOKUP_PARAMS] = {
|
||||
.entry_packing = sja1105et_l2_lookup_params_entry_packing,
|
||||
.cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/if_ether.h>
|
||||
#include <linux/dsa/8021q.h>
|
||||
#include "sja1105.h"
|
||||
#include "sja1105_tas.h"
|
||||
|
||||
static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
|
||||
unsigned int startup_delay)
|
||||
|
@ -384,7 +385,9 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
|
|||
/* Disallow dynamic changing of the mirror port */
|
||||
.mirr_ptacu = 0,
|
||||
.switchid = priv->ds->index,
|
||||
/* Priority queue for link-local frames trapped to CPU */
|
||||
/* Priority queue for link-local management frames
|
||||
* (both ingress to and egress from CPU - PTP, STP etc)
|
||||
*/
|
||||
.hostprio = 7,
|
||||
.mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
|
||||
.mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
|
||||
|
@ -1380,7 +1383,7 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
|
|||
* modify at runtime (currently only MAC) and restore them after uploading,
|
||||
* such that this operation is relatively seamless.
|
||||
*/
|
||||
static int sja1105_static_config_reload(struct sja1105_private *priv)
|
||||
int sja1105_static_config_reload(struct sja1105_private *priv)
|
||||
{
|
||||
struct sja1105_mac_config_entry *mac;
|
||||
int speed_mbps[SJA1105_NUM_PORTS];
|
||||
|
@ -1711,6 +1714,9 @@ static int sja1105_setup(struct dsa_switch *ds)
|
|||
*/
|
||||
ds->vlan_filtering_is_global = true;
|
||||
|
||||
/* Advertise the 8 egress queues */
|
||||
ds->num_tx_queues = SJA1105_NUM_TC;
|
||||
|
||||
/* The DSA/switchdev model brings up switch ports in standalone mode by
|
||||
* default, and that means vlan_filtering is 0 since they're not under
|
||||
* a bridge, so it's safe to set up switch tagging at this time.
|
||||
|
@ -1722,6 +1728,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
|
|||
{
|
||||
struct sja1105_private *priv = ds->priv;
|
||||
|
||||
sja1105_tas_teardown(ds);
|
||||
cancel_work_sync(&priv->tagger_data.rxtstamp_work);
|
||||
skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
|
||||
sja1105_ptp_clock_unregister(priv);
|
||||
|
@ -2051,6 +2058,18 @@ static bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
|
|||
return true;
|
||||
}
|
||||
|
||||
static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
|
||||
enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
switch (type) {
|
||||
case TC_SETUP_QDISC_TAPRIO:
|
||||
return sja1105_setup_tc_taprio(ds, port, type_data);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct dsa_switch_ops sja1105_switch_ops = {
|
||||
.get_tag_protocol = sja1105_get_tag_protocol,
|
||||
.setup = sja1105_setup,
|
||||
|
@ -2083,6 +2102,7 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
|
|||
.port_hwtstamp_set = sja1105_hwtstamp_set,
|
||||
.port_rxtstamp = sja1105_port_rxtstamp,
|
||||
.port_txtstamp = sja1105_port_txtstamp,
|
||||
.port_setup_tc = sja1105_port_setup_tc,
|
||||
};
|
||||
|
||||
static int sja1105_check_device_id(struct sja1105_private *priv)
|
||||
|
@ -2192,6 +2212,8 @@ static int sja1105_probe(struct spi_device *spi)
|
|||
}
|
||||
mutex_init(&priv->mgmt_lock);
|
||||
|
||||
sja1105_tas_setup(ds);
|
||||
|
||||
return dsa_register_switch(priv->ds);
|
||||
}
|
||||
|
||||
|
|
|
@ -371,6 +371,63 @@ size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
|
|||
return size;
|
||||
}
|
||||
|
||||
static size_t
|
||||
sja1105_schedule_entry_points_params_entry_packing(void *buf, void *entry_ptr,
|
||||
enum packing_op op)
|
||||
{
|
||||
struct sja1105_schedule_entry_points_params_entry *entry = entry_ptr;
|
||||
const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY;
|
||||
|
||||
sja1105_packing(buf, &entry->clksrc, 31, 30, size, op);
|
||||
sja1105_packing(buf, &entry->actsubsch, 29, 27, size, op);
|
||||
return size;
|
||||
}
|
||||
|
||||
static size_t
|
||||
sja1105_schedule_entry_points_entry_packing(void *buf, void *entry_ptr,
|
||||
enum packing_op op)
|
||||
{
|
||||
struct sja1105_schedule_entry_points_entry *entry = entry_ptr;
|
||||
const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY;
|
||||
|
||||
sja1105_packing(buf, &entry->subschindx, 31, 29, size, op);
|
||||
sja1105_packing(buf, &entry->delta, 28, 11, size, op);
|
||||
sja1105_packing(buf, &entry->address, 10, 1, size, op);
|
||||
return size;
|
||||
}
|
||||
|
||||
static size_t sja1105_schedule_params_entry_packing(void *buf, void *entry_ptr,
|
||||
enum packing_op op)
|
||||
{
|
||||
const size_t size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY;
|
||||
struct sja1105_schedule_params_entry *entry = entry_ptr;
|
||||
int offset, i;
|
||||
|
||||
for (i = 0, offset = 16; i < 8; i++, offset += 10)
|
||||
sja1105_packing(buf, &entry->subscheind[i],
|
||||
offset + 9, offset + 0, size, op);
|
||||
return size;
|
||||
}
|
||||
|
||||
static size_t sja1105_schedule_entry_packing(void *buf, void *entry_ptr,
|
||||
enum packing_op op)
|
||||
{
|
||||
const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY;
|
||||
struct sja1105_schedule_entry *entry = entry_ptr;
|
||||
|
||||
sja1105_packing(buf, &entry->winstindex, 63, 54, size, op);
|
||||
sja1105_packing(buf, &entry->winend, 53, 53, size, op);
|
||||
sja1105_packing(buf, &entry->winst, 52, 52, size, op);
|
||||
sja1105_packing(buf, &entry->destports, 51, 47, size, op);
|
||||
sja1105_packing(buf, &entry->setvalid, 46, 46, size, op);
|
||||
sja1105_packing(buf, &entry->txen, 45, 45, size, op);
|
||||
sja1105_packing(buf, &entry->resmedia_en, 44, 44, size, op);
|
||||
sja1105_packing(buf, &entry->resmedia, 43, 36, size, op);
|
||||
sja1105_packing(buf, &entry->vlindex, 35, 26, size, op);
|
||||
sja1105_packing(buf, &entry->delta, 25, 8, size, op);
|
||||
return size;
|
||||
}
|
||||
|
||||
size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
|
||||
enum packing_op op)
|
||||
{
|
||||
|
@ -447,11 +504,15 @@ static void sja1105_table_write_crc(u8 *table_start, u8 *crc_ptr)
|
|||
* before blindly indexing kernel memory with the blk_idx.
|
||||
*/
|
||||
static u64 blk_id_map[BLK_IDX_MAX] = {
|
||||
[BLK_IDX_SCHEDULE] = BLKID_SCHEDULE,
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = BLKID_SCHEDULE_ENTRY_POINTS,
|
||||
[BLK_IDX_L2_LOOKUP] = BLKID_L2_LOOKUP,
|
||||
[BLK_IDX_L2_POLICING] = BLKID_L2_POLICING,
|
||||
[BLK_IDX_VLAN_LOOKUP] = BLKID_VLAN_LOOKUP,
|
||||
[BLK_IDX_L2_FORWARDING] = BLKID_L2_FORWARDING,
|
||||
[BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG,
|
||||
[BLK_IDX_SCHEDULE_PARAMS] = BLKID_SCHEDULE_PARAMS,
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = BLKID_SCHEDULE_ENTRY_POINTS_PARAMS,
|
||||
[BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS,
|
||||
[BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS,
|
||||
[BLK_IDX_AVB_PARAMS] = BLKID_AVB_PARAMS,
|
||||
|
@ -461,6 +522,13 @@ static u64 blk_id_map[BLK_IDX_MAX] = {
|
|||
|
||||
const char *sja1105_static_config_error_msg[] = {
|
||||
[SJA1105_CONFIG_OK] = "",
|
||||
[SJA1105_TTETHERNET_NOT_SUPPORTED] =
|
||||
"schedule-table present, but TTEthernet is "
|
||||
"only supported on T and Q/S",
|
||||
[SJA1105_INCORRECT_TTETHERNET_CONFIGURATION] =
|
||||
"schedule-table present, but one of "
|
||||
"schedule-entry-points-table, schedule-parameters-table or "
|
||||
"schedule-entry-points-parameters table is empty",
|
||||
[SJA1105_MISSING_L2_POLICING_TABLE] =
|
||||
"l2-policing-table needs to have at least one entry",
|
||||
[SJA1105_MISSING_L2_FORWARDING_TABLE] =
|
||||
|
@ -508,6 +576,21 @@ sja1105_static_config_check_valid(const struct sja1105_static_config *config)
|
|||
#define IS_FULL(blk_idx) \
|
||||
(tables[blk_idx].entry_count == tables[blk_idx].ops->max_entry_count)
|
||||
|
||||
if (tables[BLK_IDX_SCHEDULE].entry_count) {
|
||||
if (config->device_id != SJA1105T_DEVICE_ID &&
|
||||
config->device_id != SJA1105QS_DEVICE_ID)
|
||||
return SJA1105_TTETHERNET_NOT_SUPPORTED;
|
||||
|
||||
if (tables[BLK_IDX_SCHEDULE_ENTRY_POINTS].entry_count == 0)
|
||||
return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
|
||||
|
||||
if (!IS_FULL(BLK_IDX_SCHEDULE_PARAMS))
|
||||
return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
|
||||
|
||||
if (!IS_FULL(BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS))
|
||||
return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
|
||||
}
|
||||
|
||||
if (tables[BLK_IDX_L2_POLICING].entry_count == 0)
|
||||
return SJA1105_MISSING_L2_POLICING_TABLE;
|
||||
|
||||
|
@ -614,6 +697,8 @@ sja1105_static_config_get_length(const struct sja1105_static_config *config)
|
|||
|
||||
/* SJA1105E: First generation, no TTEthernet */
|
||||
struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
|
||||
[BLK_IDX_SCHEDULE] = {0},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
|
||||
[BLK_IDX_L2_LOOKUP] = {
|
||||
.packing = sja1105et_l2_lookup_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
|
||||
|
@ -644,6 +729,8 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
|
|||
.packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_PARAMS] = {0},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
|
||||
[BLK_IDX_L2_LOOKUP_PARAMS] = {
|
||||
.packing = sja1105et_l2_lookup_params_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
|
||||
|
@ -678,6 +765,18 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
|
|||
|
||||
/* SJA1105T: First generation, TTEthernet */
|
||||
struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
|
||||
[BLK_IDX_SCHEDULE] = {
|
||||
.packing = sja1105_schedule_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
|
||||
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
|
||||
.packing = sja1105_schedule_entry_points_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
|
||||
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
|
||||
},
|
||||
[BLK_IDX_L2_LOOKUP] = {
|
||||
.packing = sja1105et_l2_lookup_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
|
||||
|
@ -708,6 +807,18 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
|
|||
.packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_PARAMS] = {
|
||||
.packing = sja1105_schedule_params_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
|
||||
.packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
|
||||
.packing = sja1105_schedule_entry_points_params_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
|
||||
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
|
||||
},
|
||||
[BLK_IDX_L2_LOOKUP_PARAMS] = {
|
||||
.packing = sja1105et_l2_lookup_params_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
|
||||
|
@ -742,6 +853,8 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
|
|||
|
||||
/* SJA1105P: Second generation, no TTEthernet, no SGMII */
|
||||
struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
|
||||
[BLK_IDX_SCHEDULE] = {0},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
|
||||
[BLK_IDX_L2_LOOKUP] = {
|
||||
.packing = sja1105pqrs_l2_lookup_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
|
||||
|
@ -772,6 +885,8 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
|
|||
.packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_PARAMS] = {0},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
|
||||
[BLK_IDX_L2_LOOKUP_PARAMS] = {
|
||||
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
|
||||
|
@ -806,6 +921,18 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
|
|||
|
||||
/* SJA1105Q: Second generation, TTEthernet, no SGMII */
|
||||
struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
|
||||
[BLK_IDX_SCHEDULE] = {
|
||||
.packing = sja1105_schedule_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
|
||||
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
|
||||
.packing = sja1105_schedule_entry_points_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
|
||||
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
|
||||
},
|
||||
[BLK_IDX_L2_LOOKUP] = {
|
||||
.packing = sja1105pqrs_l2_lookup_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
|
||||
|
@ -836,6 +963,18 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
|
|||
.packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_PARAMS] = {
|
||||
.packing = sja1105_schedule_params_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
|
||||
.packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
|
||||
.packing = sja1105_schedule_entry_points_params_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
|
||||
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
|
||||
},
|
||||
[BLK_IDX_L2_LOOKUP_PARAMS] = {
|
||||
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
|
||||
|
@ -870,6 +1009,8 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
|
|||
|
||||
/* SJA1105R: Second generation, no TTEthernet, SGMII */
|
||||
struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
|
||||
[BLK_IDX_SCHEDULE] = {0},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
|
||||
[BLK_IDX_L2_LOOKUP] = {
|
||||
.packing = sja1105pqrs_l2_lookup_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
|
||||
|
@ -900,6 +1041,8 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
|
|||
.packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_PARAMS] = {0},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
|
||||
[BLK_IDX_L2_LOOKUP_PARAMS] = {
|
||||
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
|
||||
|
@ -934,6 +1077,18 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
|
|||
|
||||
/* SJA1105S: Second generation, TTEthernet, SGMII */
|
||||
struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
|
||||
[BLK_IDX_SCHEDULE] = {
|
||||
.packing = sja1105_schedule_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
|
||||
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
|
||||
.packing = sja1105_schedule_entry_points_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
|
||||
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
|
||||
},
|
||||
[BLK_IDX_L2_LOOKUP] = {
|
||||
.packing = sja1105pqrs_l2_lookup_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
|
||||
|
@ -964,6 +1119,18 @@ struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
|
|||
.packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_PARAMS] = {
|
||||
.packing = sja1105_schedule_params_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
|
||||
.packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
|
||||
},
|
||||
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
|
||||
.packing = sja1105_schedule_entry_points_params_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
|
||||
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
|
||||
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
|
||||
},
|
||||
[BLK_IDX_L2_LOOKUP_PARAMS] = {
|
||||
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
|
||||
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
|
||||
|
|
|
@ -11,11 +11,15 @@
|
|||
|
||||
#define SJA1105_SIZE_DEVICE_ID 4
|
||||
#define SJA1105_SIZE_TABLE_HEADER 12
|
||||
#define SJA1105_SIZE_SCHEDULE_ENTRY 8
|
||||
#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY 4
|
||||
#define SJA1105_SIZE_L2_POLICING_ENTRY 8
|
||||
#define SJA1105_SIZE_VLAN_LOOKUP_ENTRY 8
|
||||
#define SJA1105_SIZE_L2_FORWARDING_ENTRY 8
|
||||
#define SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY 12
|
||||
#define SJA1105_SIZE_XMII_PARAMS_ENTRY 4
|
||||
#define SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY 12
|
||||
#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY 4
|
||||
#define SJA1105ET_SIZE_L2_LOOKUP_ENTRY 12
|
||||
#define SJA1105ET_SIZE_MAC_CONFIG_ENTRY 28
|
||||
#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4
|
||||
|
@ -29,11 +33,15 @@
|
|||
|
||||
/* UM10944.pdf Page 11, Table 2. Configuration Blocks */
|
||||
enum {
|
||||
BLKID_SCHEDULE = 0x00,
|
||||
BLKID_SCHEDULE_ENTRY_POINTS = 0x01,
|
||||
BLKID_L2_LOOKUP = 0x05,
|
||||
BLKID_L2_POLICING = 0x06,
|
||||
BLKID_VLAN_LOOKUP = 0x07,
|
||||
BLKID_L2_FORWARDING = 0x08,
|
||||
BLKID_MAC_CONFIG = 0x09,
|
||||
BLKID_SCHEDULE_PARAMS = 0x0A,
|
||||
BLKID_SCHEDULE_ENTRY_POINTS_PARAMS = 0x0B,
|
||||
BLKID_L2_LOOKUP_PARAMS = 0x0D,
|
||||
BLKID_L2_FORWARDING_PARAMS = 0x0E,
|
||||
BLKID_AVB_PARAMS = 0x10,
|
||||
|
@ -42,11 +50,15 @@ enum {
|
|||
};
|
||||
|
||||
enum sja1105_blk_idx {
|
||||
BLK_IDX_L2_LOOKUP = 0,
|
||||
BLK_IDX_SCHEDULE = 0,
|
||||
BLK_IDX_SCHEDULE_ENTRY_POINTS,
|
||||
BLK_IDX_L2_LOOKUP,
|
||||
BLK_IDX_L2_POLICING,
|
||||
BLK_IDX_VLAN_LOOKUP,
|
||||
BLK_IDX_L2_FORWARDING,
|
||||
BLK_IDX_MAC_CONFIG,
|
||||
BLK_IDX_SCHEDULE_PARAMS,
|
||||
BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS,
|
||||
BLK_IDX_L2_LOOKUP_PARAMS,
|
||||
BLK_IDX_L2_FORWARDING_PARAMS,
|
||||
BLK_IDX_AVB_PARAMS,
|
||||
|
@ -59,11 +71,15 @@ enum sja1105_blk_idx {
|
|||
BLK_IDX_INVAL = -1,
|
||||
};
|
||||
|
||||
#define SJA1105_MAX_SCHEDULE_COUNT 1024
|
||||
#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT 2048
|
||||
#define SJA1105_MAX_L2_LOOKUP_COUNT 1024
|
||||
#define SJA1105_MAX_L2_POLICING_COUNT 45
|
||||
#define SJA1105_MAX_VLAN_LOOKUP_COUNT 4096
|
||||
#define SJA1105_MAX_L2_FORWARDING_COUNT 13
|
||||
#define SJA1105_MAX_MAC_CONFIG_COUNT 5
|
||||
#define SJA1105_MAX_SCHEDULE_PARAMS_COUNT 1
|
||||
#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT 1
|
||||
#define SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT 1
|
||||
#define SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT 1
|
||||
#define SJA1105_MAX_GENERAL_PARAMS_COUNT 1
|
||||
|
@ -83,6 +99,23 @@ enum sja1105_blk_idx {
|
|||
#define SJA1105R_PART_NO 0x9A86
|
||||
#define SJA1105S_PART_NO 0x9A87
|
||||
|
||||
struct sja1105_schedule_entry {
|
||||
u64 winstindex;
|
||||
u64 winend;
|
||||
u64 winst;
|
||||
u64 destports;
|
||||
u64 setvalid;
|
||||
u64 txen;
|
||||
u64 resmedia_en;
|
||||
u64 resmedia;
|
||||
u64 vlindex;
|
||||
u64 delta;
|
||||
};
|
||||
|
||||
struct sja1105_schedule_params_entry {
|
||||
u64 subscheind[8];
|
||||
};
|
||||
|
||||
struct sja1105_general_params_entry {
|
||||
u64 vllupformat;
|
||||
u64 mirr_ptacu;
|
||||
|
@ -112,6 +145,17 @@ struct sja1105_general_params_entry {
|
|||
u64 replay_port;
|
||||
};
|
||||
|
||||
struct sja1105_schedule_entry_points_entry {
|
||||
u64 subschindx;
|
||||
u64 delta;
|
||||
u64 address;
|
||||
};
|
||||
|
||||
struct sja1105_schedule_entry_points_params_entry {
|
||||
u64 clksrc;
|
||||
u64 actsubsch;
|
||||
};
|
||||
|
||||
struct sja1105_vlan_lookup_entry {
|
||||
u64 ving_mirr;
|
||||
u64 vegr_mirr;
|
||||
|
@ -256,6 +300,8 @@ sja1105_static_config_get_length(const struct sja1105_static_config *config);
|
|||
|
||||
typedef enum {
|
||||
SJA1105_CONFIG_OK = 0,
|
||||
SJA1105_TTETHERNET_NOT_SUPPORTED,
|
||||
SJA1105_INCORRECT_TTETHERNET_CONFIGURATION,
|
||||
SJA1105_MISSING_L2_POLICING_TABLE,
|
||||
SJA1105_MISSING_L2_FORWARDING_TABLE,
|
||||
SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE,
|
||||
|
|
|
@ -0,0 +1,423 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
|
||||
*/
|
||||
#include "sja1105.h"
|
||||
|
||||
#define SJA1105_TAS_CLKSRC_DISABLED 0
|
||||
#define SJA1105_TAS_CLKSRC_STANDALONE 1
|
||||
#define SJA1105_TAS_CLKSRC_AS6802 2
|
||||
#define SJA1105_TAS_CLKSRC_PTP 3
|
||||
#define SJA1105_TAS_MAX_DELTA BIT(19)
|
||||
#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
|
||||
|
||||
/* This is not a preprocessor macro because the "ns" argument may or may not be
|
||||
* s64 at caller side. This ensures it is properly type-cast before div_s64.
|
||||
*/
|
||||
static s64 ns_to_sja1105_delta(s64 ns)
|
||||
{
|
||||
return div_s64(ns, 200);
|
||||
}
|
||||
|
||||
/* Lo and behold: the egress scheduler from hell.
|
||||
*
|
||||
* At the hardware level, the Time-Aware Shaper holds a global linear arrray of
|
||||
* all schedule entries for all ports. These are the Gate Control List (GCL)
|
||||
* entries, let's call them "timeslots" for short. This linear array of
|
||||
* timeslots is held in BLK_IDX_SCHEDULE.
|
||||
*
|
||||
* Then there are a maximum of 8 "execution threads" inside the switch, which
|
||||
* iterate cyclically through the "schedule". Each "cycle" has an entry point
|
||||
* and an exit point, both being timeslot indices in the schedule table. The
|
||||
* hardware calls each cycle a "subschedule".
|
||||
*
|
||||
* Subschedule (cycle) i starts when
|
||||
* ptpclkval >= ptpschtm + BLK_IDX_SCHEDULE_ENTRY_POINTS[i].delta.
|
||||
*
|
||||
* The hardware scheduler iterates BLK_IDX_SCHEDULE with a k ranging from
|
||||
* k = BLK_IDX_SCHEDULE_ENTRY_POINTS[i].address to
|
||||
* k = BLK_IDX_SCHEDULE_PARAMS.subscheind[i]
|
||||
*
|
||||
* For each schedule entry (timeslot) k, the engine executes the gate control
|
||||
* list entry for the duration of BLK_IDX_SCHEDULE[k].delta.
|
||||
*
|
||||
* +---------+
|
||||
* | | BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS
|
||||
* +---------+
|
||||
* |
|
||||
* +-----------------+
|
||||
* | .actsubsch
|
||||
* BLK_IDX_SCHEDULE_ENTRY_POINTS v
|
||||
* +-------+-------+
|
||||
* |cycle 0|cycle 1|
|
||||
* +-------+-------+
|
||||
* | | | |
|
||||
* +----------------+ | | +-------------------------------------+
|
||||
* | .subschindx | | .subschindx |
|
||||
* | | +---------------+ |
|
||||
* | .address | .address | |
|
||||
* | | | |
|
||||
* | | | |
|
||||
* | BLK_IDX_SCHEDULE v v |
|
||||
* | +-------+-------+-------+-------+-------+------+ |
|
||||
* | |entry 0|entry 1|entry 2|entry 3|entry 4|entry5| |
|
||||
* | +-------+-------+-------+-------+-------+------+ |
|
||||
* | ^ ^ ^ ^ |
|
||||
* | | | | | |
|
||||
* | +-------------------------+ | | | |
|
||||
* | | +-------------------------------+ | | |
|
||||
* | | | +-------------------+ | |
|
||||
* | | | | | |
|
||||
* | +---------------------------------------------------------------+ |
|
||||
* | |subscheind[0]<=subscheind[1]<=subscheind[2]<=...<=subscheind[7]| |
|
||||
* | +---------------------------------------------------------------+ |
|
||||
* | ^ ^ BLK_IDX_SCHEDULE_PARAMS |
|
||||
* | | | |
|
||||
* +--------+ +-------------------------------------------+
|
||||
*
|
||||
* In the above picture there are two subschedules (cycles):
|
||||
*
|
||||
* - cycle 0: iterates the schedule table from 0 to 2 (and back)
|
||||
* - cycle 1: iterates the schedule table from 3 to 5 (and back)
|
||||
*
|
||||
* All other possible execution threads must be marked as unused by making
|
||||
* their "subschedule end index" (subscheind) equal to the last valid
|
||||
* subschedule's end index (in this case 5).
|
||||
*/
|
||||
static int sja1105_init_scheduling(struct sja1105_private *priv)
|
||||
{
|
||||
struct sja1105_schedule_entry_points_entry *schedule_entry_points;
|
||||
struct sja1105_schedule_entry_points_params_entry
|
||||
*schedule_entry_points_params;
|
||||
struct sja1105_schedule_params_entry *schedule_params;
|
||||
struct sja1105_tas_data *tas_data = &priv->tas_data;
|
||||
struct sja1105_schedule_entry *schedule;
|
||||
struct sja1105_table *table;
|
||||
int schedule_start_idx;
|
||||
s64 entry_point_delta;
|
||||
int schedule_end_idx;
|
||||
int num_entries = 0;
|
||||
int num_cycles = 0;
|
||||
int cycle = 0;
|
||||
int i, k = 0;
|
||||
int port;
|
||||
|
||||
/* Discard previous Schedule Table */
|
||||
table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
|
||||
if (table->entry_count) {
|
||||
kfree(table->entries);
|
||||
table->entry_count = 0;
|
||||
}
|
||||
|
||||
/* Discard previous Schedule Entry Points Parameters Table */
|
||||
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
|
||||
if (table->entry_count) {
|
||||
kfree(table->entries);
|
||||
table->entry_count = 0;
|
||||
}
|
||||
|
||||
/* Discard previous Schedule Parameters Table */
|
||||
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
|
||||
if (table->entry_count) {
|
||||
kfree(table->entries);
|
||||
table->entry_count = 0;
|
||||
}
|
||||
|
||||
/* Discard previous Schedule Entry Points Table */
|
||||
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
|
||||
if (table->entry_count) {
|
||||
kfree(table->entries);
|
||||
table->entry_count = 0;
|
||||
}
|
||||
|
||||
/* Figure out the dimensioning of the problem */
|
||||
for (port = 0; port < SJA1105_NUM_PORTS; port++) {
|
||||
if (tas_data->offload[port]) {
|
||||
num_entries += tas_data->offload[port]->num_entries;
|
||||
num_cycles++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Nothing to do */
|
||||
if (!num_cycles)
|
||||
return 0;
|
||||
|
||||
/* Pre-allocate space in the static config tables */
|
||||
|
||||
/* Schedule Table */
|
||||
table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
|
||||
table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size,
|
||||
GFP_KERNEL);
|
||||
if (!table->entries)
|
||||
return -ENOMEM;
|
||||
table->entry_count = num_entries;
|
||||
schedule = table->entries;
|
||||
|
||||
/* Schedule Points Parameters Table */
|
||||
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
|
||||
table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
|
||||
table->ops->unpacked_entry_size, GFP_KERNEL);
|
||||
if (!table->entries)
|
||||
/* Previously allocated memory will be freed automatically in
|
||||
* sja1105_static_config_free. This is true for all early
|
||||
* returns below.
|
||||
*/
|
||||
return -ENOMEM;
|
||||
table->entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT;
|
||||
schedule_entry_points_params = table->entries;
|
||||
|
||||
/* Schedule Parameters Table */
|
||||
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
|
||||
table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
|
||||
table->ops->unpacked_entry_size, GFP_KERNEL);
|
||||
if (!table->entries)
|
||||
return -ENOMEM;
|
||||
table->entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT;
|
||||
schedule_params = table->entries;
|
||||
|
||||
/* Schedule Entry Points Table */
|
||||
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
|
||||
table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size,
|
||||
GFP_KERNEL);
|
||||
if (!table->entries)
|
||||
return -ENOMEM;
|
||||
table->entry_count = num_cycles;
|
||||
schedule_entry_points = table->entries;
|
||||
|
||||
/* Finally start populating the static config tables */
|
||||
schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_STANDALONE;
|
||||
schedule_entry_points_params->actsubsch = num_cycles - 1;
|
||||
|
||||
for (port = 0; port < SJA1105_NUM_PORTS; port++) {
|
||||
const struct tc_taprio_qopt_offload *offload;
|
||||
|
||||
offload = tas_data->offload[port];
|
||||
if (!offload)
|
||||
continue;
|
||||
|
||||
schedule_start_idx = k;
|
||||
schedule_end_idx = k + offload->num_entries - 1;
|
||||
/* TODO this is the base time for the port's subschedule,
|
||||
* relative to PTPSCHTM. But as we're using the standalone
|
||||
* clock source and not PTP clock as time reference, there's
|
||||
* little point in even trying to put more logic into this,
|
||||
* like preserving the phases between the subschedules of
|
||||
* different ports. We'll get all of that when switching to the
|
||||
* PTP clock source.
|
||||
*/
|
||||
entry_point_delta = 1;
|
||||
|
||||
schedule_entry_points[cycle].subschindx = cycle;
|
||||
schedule_entry_points[cycle].delta = entry_point_delta;
|
||||
schedule_entry_points[cycle].address = schedule_start_idx;
|
||||
|
||||
/* The subschedule end indices need to be
|
||||
* monotonically increasing.
|
||||
*/
|
||||
for (i = cycle; i < 8; i++)
|
||||
schedule_params->subscheind[i] = schedule_end_idx;
|
||||
|
||||
for (i = 0; i < offload->num_entries; i++, k++) {
|
||||
s64 delta_ns = offload->entries[i].interval;
|
||||
|
||||
schedule[k].delta = ns_to_sja1105_delta(delta_ns);
|
||||
schedule[k].destports = BIT(port);
|
||||
schedule[k].resmedia_en = true;
|
||||
schedule[k].resmedia = SJA1105_GATE_MASK &
|
||||
~offload->entries[i].gate_mask;
|
||||
}
|
||||
cycle++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Be there 2 port subschedules, each executing an arbitrary number of gate
|
||||
* open/close events cyclically.
|
||||
* None of those gate events must ever occur at the exact same time, otherwise
|
||||
* the switch is known to act in exotically strange ways.
|
||||
* However the hardware doesn't bother performing these integrity checks.
|
||||
* So here we are with the task of validating whether the new @admin offload
|
||||
* has any conflict with the already established TAS configuration in
|
||||
* tas_data->offload. We already know the other ports are in harmony with one
|
||||
* another, otherwise we wouldn't have saved them.
|
||||
* Each gate event executes periodically, with a period of @cycle_time and a
|
||||
* phase given by its cycle's @base_time plus its offset within the cycle
|
||||
* (which in turn is given by the length of the events prior to it).
|
||||
* There are two aspects to possible collisions:
|
||||
* - Collisions within one cycle's (actually the longest cycle's) time frame.
|
||||
* For that, we need to compare the cartesian product of each possible
|
||||
* occurrence of each event within one cycle time.
|
||||
* - Collisions in the future. Events may not collide within one cycle time,
|
||||
* but if two port schedules don't have the same periodicity (aka the cycle
|
||||
* times aren't multiples of one another), they surely will some time in the
|
||||
* future (actually they will collide an infinite amount of times).
|
||||
*/
|
||||
static bool
|
||||
sja1105_tas_check_conflicts(struct sja1105_private *priv, int port,
|
||||
const struct tc_taprio_qopt_offload *admin)
|
||||
{
|
||||
struct sja1105_tas_data *tas_data = &priv->tas_data;
|
||||
const struct tc_taprio_qopt_offload *offload;
|
||||
s64 max_cycle_time, min_cycle_time;
|
||||
s64 delta1, delta2;
|
||||
s64 rbt1, rbt2;
|
||||
s64 stop_time;
|
||||
s64 t1, t2;
|
||||
int i, j;
|
||||
s32 rem;
|
||||
|
||||
offload = tas_data->offload[port];
|
||||
if (!offload)
|
||||
return false;
|
||||
|
||||
/* Check if the two cycle times are multiples of one another.
|
||||
* If they aren't, then they will surely collide.
|
||||
*/
|
||||
max_cycle_time = max(offload->cycle_time, admin->cycle_time);
|
||||
min_cycle_time = min(offload->cycle_time, admin->cycle_time);
|
||||
div_s64_rem(max_cycle_time, min_cycle_time, &rem);
|
||||
if (rem)
|
||||
return true;
|
||||
|
||||
/* Calculate the "reduced" base time of each of the two cycles
|
||||
* (transposed back as close to 0 as possible) by dividing to
|
||||
* the cycle time.
|
||||
*/
|
||||
div_s64_rem(offload->base_time, offload->cycle_time, &rem);
|
||||
rbt1 = rem;
|
||||
|
||||
div_s64_rem(admin->base_time, admin->cycle_time, &rem);
|
||||
rbt2 = rem;
|
||||
|
||||
stop_time = max_cycle_time + max(rbt1, rbt2);
|
||||
|
||||
/* delta1 is the relative base time of each GCL entry within
|
||||
* the established ports' TAS config.
|
||||
*/
|
||||
for (i = 0, delta1 = 0;
|
||||
i < offload->num_entries;
|
||||
delta1 += offload->entries[i].interval, i++) {
|
||||
/* delta2 is the relative base time of each GCL entry
|
||||
* within the newly added TAS config.
|
||||
*/
|
||||
for (j = 0, delta2 = 0;
|
||||
j < admin->num_entries;
|
||||
delta2 += admin->entries[j].interval, j++) {
|
||||
/* t1 follows all possible occurrences of the
|
||||
* established ports' GCL entry i within the
|
||||
* first cycle time.
|
||||
*/
|
||||
for (t1 = rbt1 + delta1;
|
||||
t1 <= stop_time;
|
||||
t1 += offload->cycle_time) {
|
||||
/* t2 follows all possible occurrences
|
||||
* of the newly added GCL entry j
|
||||
* within the first cycle time.
|
||||
*/
|
||||
for (t2 = rbt2 + delta2;
|
||||
t2 <= stop_time;
|
||||
t2 += admin->cycle_time) {
|
||||
if (t1 == t2) {
|
||||
dev_warn(priv->ds->dev,
|
||||
"GCL entry %d collides with entry %d of port %d\n",
|
||||
j, i, port);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
|
||||
struct tc_taprio_qopt_offload *admin)
|
||||
{
|
||||
struct sja1105_private *priv = ds->priv;
|
||||
struct sja1105_tas_data *tas_data = &priv->tas_data;
|
||||
int other_port, rc, i;
|
||||
|
||||
/* Can't change an already configured port (must delete qdisc first).
|
||||
* Can't delete the qdisc from an unconfigured port.
|
||||
*/
|
||||
if (!!tas_data->offload[port] == admin->enable)
|
||||
return -EINVAL;
|
||||
|
||||
if (!admin->enable) {
|
||||
taprio_offload_free(tas_data->offload[port]);
|
||||
tas_data->offload[port] = NULL;
|
||||
|
||||
rc = sja1105_init_scheduling(priv);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
return sja1105_static_config_reload(priv);
|
||||
}
|
||||
|
||||
/* The cycle time extension is the amount of time the last cycle from
|
||||
* the old OPER needs to be extended in order to phase-align with the
|
||||
* base time of the ADMIN when that becomes the new OPER.
|
||||
* But of course our switch needs to be reset to switch-over between
|
||||
* the ADMIN and the OPER configs - so much for a seamless transition.
|
||||
* So don't add insult over injury and just say we don't support cycle
|
||||
* time extension.
|
||||
*/
|
||||
if (admin->cycle_time_extension)
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (!ns_to_sja1105_delta(admin->base_time)) {
|
||||
dev_err(ds->dev, "A base time of zero is not hardware-allowed\n");
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
for (i = 0; i < admin->num_entries; i++) {
|
||||
s64 delta_ns = admin->entries[i].interval;
|
||||
s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
|
||||
bool too_long, too_short;
|
||||
|
||||
too_long = (delta_cycles >= SJA1105_TAS_MAX_DELTA);
|
||||
too_short = (delta_cycles == 0);
|
||||
if (too_long || too_short) {
|
||||
dev_err(priv->ds->dev,
|
||||
"Interval %llu too %s for GCL entry %d\n",
|
||||
delta_ns, too_long ? "long" : "short", i);
|
||||
return -ERANGE;
|
||||
}
|
||||
}
|
||||
|
||||
for (other_port = 0; other_port < SJA1105_NUM_PORTS; other_port++) {
|
||||
if (other_port == port)
|
||||
continue;
|
||||
|
||||
if (sja1105_tas_check_conflicts(priv, other_port, admin))
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
tas_data->offload[port] = taprio_offload_get(admin);
|
||||
|
||||
rc = sja1105_init_scheduling(priv);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
return sja1105_static_config_reload(priv);
|
||||
}
|
||||
|
||||
void sja1105_tas_setup(struct dsa_switch *ds)
|
||||
{
|
||||
}
|
||||
|
||||
void sja1105_tas_teardown(struct dsa_switch *ds)
|
||||
{
|
||||
struct sja1105_private *priv = ds->priv;
|
||||
struct tc_taprio_qopt_offload *offload;
|
||||
int port;
|
||||
|
||||
for (port = 0; port < SJA1105_NUM_PORTS; port++) {
|
||||
offload = priv->tas_data.offload[port];
|
||||
if (!offload)
|
||||
continue;
|
||||
|
||||
taprio_offload_free(offload);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
|
||||
*/
|
||||
#ifndef _SJA1105_TAS_H
|
||||
#define _SJA1105_TAS_H
|
||||
|
||||
#include <net/pkt_sched.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS)
|
||||
|
||||
struct sja1105_tas_data {
|
||||
struct tc_taprio_qopt_offload *offload[SJA1105_NUM_PORTS];
|
||||
};
|
||||
|
||||
int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
|
||||
struct tc_taprio_qopt_offload *admin);
|
||||
|
||||
void sja1105_tas_setup(struct dsa_switch *ds);
|
||||
|
||||
void sja1105_tas_teardown(struct dsa_switch *ds);
|
||||
|
||||
#else
|
||||
|
||||
/* C doesn't allow empty structures, bah! */
|
||||
struct sja1105_tas_data {
|
||||
u8 dummy;
|
||||
};
|
||||
|
||||
static inline int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
|
||||
struct tc_taprio_qopt_offload *admin)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void sja1105_tas_setup(struct dsa_switch *ds) { }
|
||||
|
||||
static inline void sja1105_tas_teardown(struct dsa_switch *ds) { }
|
||||
|
||||
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS) */
|
||||
|
||||
#endif /* _SJA1105_TAS_H */
|
|
@ -847,6 +847,7 @@ enum tc_setup_type {
|
|||
TC_SETUP_QDISC_ETF,
|
||||
TC_SETUP_ROOT_QDISC,
|
||||
TC_SETUP_QDISC_GRED,
|
||||
TC_SETUP_QDISC_TAPRIO,
|
||||
};
|
||||
|
||||
/* These structures hold the attributes of bpf state that are being passed
|
||||
|
|
|
@ -515,6 +515,8 @@ struct dsa_switch_ops {
|
|||
bool ingress);
|
||||
void (*port_mirror_del)(struct dsa_switch *ds, int port,
|
||||
struct dsa_mall_mirror_tc_entry *mirror);
|
||||
int (*port_setup_tc)(struct dsa_switch *ds, int port,
|
||||
enum tc_setup_type type, void *type_data);
|
||||
|
||||
/*
|
||||
* Cross-chip operations
|
||||
|
|
|
@ -161,4 +161,27 @@ struct tc_etf_qopt_offload {
|
|||
s32 queue;
|
||||
};
|
||||
|
||||
struct tc_taprio_sched_entry {
|
||||
u8 command; /* TC_TAPRIO_CMD_* */
|
||||
|
||||
/* The gate_mask in the offloading side refers to traffic classes */
|
||||
u32 gate_mask;
|
||||
u32 interval;
|
||||
};
|
||||
|
||||
struct tc_taprio_qopt_offload {
|
||||
u8 enable;
|
||||
ktime_t base_time;
|
||||
u64 cycle_time;
|
||||
u64 cycle_time_extension;
|
||||
|
||||
size_t num_entries;
|
||||
struct tc_taprio_sched_entry entries[0];
|
||||
};
|
||||
|
||||
/* Reference counting */
|
||||
struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
|
||||
*offload);
|
||||
void taprio_offload_free(struct tc_taprio_qopt_offload *offload);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1160,7 +1160,8 @@ enum {
|
|||
* [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL]
|
||||
*/
|
||||
|
||||
#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST 0x1
|
||||
#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST BIT(0)
|
||||
#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD BIT(1)
|
||||
|
||||
enum {
|
||||
TCA_TAPRIO_ATTR_UNSPEC,
|
||||
|
|
|
@ -1035,12 +1035,16 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
|
|||
static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
struct dsa_port *dp = dsa_slave_to_port(dev);
|
||||
struct dsa_switch *ds = dp->ds;
|
||||
|
||||
if (type == TC_SETUP_BLOCK)
|
||||
return dsa_slave_setup_tc_block(dev, type_data);
|
||||
default:
|
||||
|
||||
if (!ds->ops->port_setup_tc)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
|
||||
}
|
||||
|
||||
static void dsa_slave_get_stats64(struct net_device *dev,
|
||||
|
|
|
@ -89,7 +89,8 @@ static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
|
|||
struct dsa_port *dp = dsa_slave_to_port(netdev);
|
||||
struct dsa_switch *ds = dp->ds;
|
||||
u16 tx_vid = dsa_8021q_tx_vid(ds, dp->index);
|
||||
u8 pcp = skb->priority;
|
||||
u16 queue_mapping = skb_get_queue_mapping(skb);
|
||||
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
|
||||
|
||||
/* Transmitting management traffic does not rely upon switch tagging,
|
||||
* but instead SPI-installed management routes. Part 2 of this
|
||||
|
|
|
@ -29,8 +29,8 @@ static DEFINE_SPINLOCK(taprio_list_lock);
|
|||
|
||||
#define TAPRIO_ALL_GATES_OPEN -1
|
||||
|
||||
#define FLAGS_VALID(flags) (!((flags) & ~TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST))
|
||||
#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
|
||||
#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
|
||||
|
||||
struct sched_entry {
|
||||
struct list_head list;
|
||||
|
@ -75,9 +75,16 @@ struct taprio_sched {
|
|||
struct sched_gate_list __rcu *admin_sched;
|
||||
struct hrtimer advance_timer;
|
||||
struct list_head taprio_list;
|
||||
struct sk_buff *(*dequeue)(struct Qdisc *sch);
|
||||
struct sk_buff *(*peek)(struct Qdisc *sch);
|
||||
u32 txtime_delay;
|
||||
};
|
||||
|
||||
struct __tc_taprio_qopt_offload {
|
||||
refcount_t users;
|
||||
struct tc_taprio_qopt_offload offload;
|
||||
};
|
||||
|
||||
static ktime_t sched_base_time(const struct sched_gate_list *sched)
|
||||
{
|
||||
if (!sched)
|
||||
|
@ -268,6 +275,19 @@ static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
|
|||
return entry;
|
||||
}
|
||||
|
||||
static bool taprio_flags_valid(u32 flags)
|
||||
{
|
||||
/* Make sure no other flag bits are set. */
|
||||
if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
|
||||
TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
|
||||
return false;
|
||||
/* txtime-assist and full offload are mutually exclusive */
|
||||
if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
|
||||
(flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* This returns the tstamp value set by TCP in terms of the set clock. */
|
||||
static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
|
||||
{
|
||||
|
@ -417,7 +437,7 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||
return qdisc_enqueue(skb, child, to_free);
|
||||
}
|
||||
|
||||
static struct sk_buff *taprio_peek(struct Qdisc *sch)
|
||||
static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
|
||||
{
|
||||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
struct net_device *dev = qdisc_dev(sch);
|
||||
|
@ -461,6 +481,36 @@ static struct sk_buff *taprio_peek(struct Qdisc *sch)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
|
||||
{
|
||||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
struct net_device *dev = qdisc_dev(sch);
|
||||
struct sk_buff *skb;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||
struct Qdisc *child = q->qdiscs[i];
|
||||
|
||||
if (unlikely(!child))
|
||||
continue;
|
||||
|
||||
skb = child->ops->peek(child);
|
||||
if (!skb)
|
||||
continue;
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct sk_buff *taprio_peek(struct Qdisc *sch)
|
||||
{
|
||||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
|
||||
return q->peek(sch);
|
||||
}
|
||||
|
||||
static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
|
||||
{
|
||||
atomic_set(&entry->budget,
|
||||
|
@ -468,7 +518,7 @@ static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
|
|||
atomic64_read(&q->picos_per_byte)));
|
||||
}
|
||||
|
||||
static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
|
||||
static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
|
||||
{
|
||||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
struct net_device *dev = qdisc_dev(sch);
|
||||
|
@ -550,6 +600,40 @@ done:
|
|||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
|
||||
{
|
||||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
struct net_device *dev = qdisc_dev(sch);
|
||||
struct sk_buff *skb;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||
struct Qdisc *child = q->qdiscs[i];
|
||||
|
||||
if (unlikely(!child))
|
||||
continue;
|
||||
|
||||
skb = child->ops->dequeue(child);
|
||||
if (unlikely(!skb))
|
||||
continue;
|
||||
|
||||
qdisc_bstats_update(sch, skb);
|
||||
qdisc_qstats_backlog_dec(sch, skb);
|
||||
sch->q.qlen--;
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
|
||||
{
|
||||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
|
||||
return q->dequeue(sch);
|
||||
}
|
||||
|
||||
static bool should_restart_cycle(const struct sched_gate_list *oper,
|
||||
const struct sched_entry *entry)
|
||||
{
|
||||
|
@ -932,6 +1016,9 @@ static void taprio_start_sched(struct Qdisc *sch,
|
|||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
ktime_t expires;
|
||||
|
||||
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
|
||||
return;
|
||||
|
||||
expires = hrtimer_get_expires(&q->advance_timer);
|
||||
if (expires == 0)
|
||||
expires = KTIME_MAX;
|
||||
|
@ -1011,6 +1098,254 @@ static void setup_txtime(struct taprio_sched *q,
|
|||
}
|
||||
}
|
||||
|
||||
static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
|
||||
{
|
||||
size_t size = sizeof(struct tc_taprio_sched_entry) * num_entries +
|
||||
sizeof(struct __tc_taprio_qopt_offload);
|
||||
struct __tc_taprio_qopt_offload *__offload;
|
||||
|
||||
__offload = kzalloc(size, GFP_KERNEL);
|
||||
if (!__offload)
|
||||
return NULL;
|
||||
|
||||
refcount_set(&__offload->users, 1);
|
||||
|
||||
return &__offload->offload;
|
||||
}
|
||||
|
||||
struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
|
||||
*offload)
|
||||
{
|
||||
struct __tc_taprio_qopt_offload *__offload;
|
||||
|
||||
__offload = container_of(offload, struct __tc_taprio_qopt_offload,
|
||||
offload);
|
||||
|
||||
refcount_inc(&__offload->users);
|
||||
|
||||
return offload;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(taprio_offload_get);
|
||||
|
||||
void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
|
||||
{
|
||||
struct __tc_taprio_qopt_offload *__offload;
|
||||
|
||||
__offload = container_of(offload, struct __tc_taprio_qopt_offload,
|
||||
offload);
|
||||
|
||||
if (!refcount_dec_and_test(&__offload->users))
|
||||
return;
|
||||
|
||||
kfree(__offload);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(taprio_offload_free);
|
||||
|
||||
/* The function will only serve to keep the pointers to the "oper" and "admin"
|
||||
* schedules valid in relation to their base times, so when calling dump() the
|
||||
* users looks at the right schedules.
|
||||
* When using full offload, the admin configuration is promoted to oper at the
|
||||
* base_time in the PHC time domain. But because the system time is not
|
||||
* necessarily in sync with that, we can't just trigger a hrtimer to call
|
||||
* switch_schedules at the right hardware time.
|
||||
* At the moment we call this by hand right away from taprio, but in the future
|
||||
* it will be useful to create a mechanism for drivers to notify taprio of the
|
||||
* offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
|
||||
* This is left as TODO.
|
||||
*/
|
||||
void taprio_offload_config_changed(struct taprio_sched *q)
|
||||
{
|
||||
struct sched_gate_list *oper, *admin;
|
||||
|
||||
spin_lock(&q->current_entry_lock);
|
||||
|
||||
oper = rcu_dereference_protected(q->oper_sched,
|
||||
lockdep_is_held(&q->current_entry_lock));
|
||||
admin = rcu_dereference_protected(q->admin_sched,
|
||||
lockdep_is_held(&q->current_entry_lock));
|
||||
|
||||
switch_schedules(q, &admin, &oper);
|
||||
|
||||
spin_unlock(&q->current_entry_lock);
|
||||
}
|
||||
|
||||
static void taprio_sched_to_offload(struct taprio_sched *q,
|
||||
struct sched_gate_list *sched,
|
||||
const struct tc_mqprio_qopt *mqprio,
|
||||
struct tc_taprio_qopt_offload *offload)
|
||||
{
|
||||
struct sched_entry *entry;
|
||||
int i = 0;
|
||||
|
||||
offload->base_time = sched->base_time;
|
||||
offload->cycle_time = sched->cycle_time;
|
||||
offload->cycle_time_extension = sched->cycle_time_extension;
|
||||
|
||||
list_for_each_entry(entry, &sched->entries, list) {
|
||||
struct tc_taprio_sched_entry *e = &offload->entries[i];
|
||||
|
||||
e->command = entry->command;
|
||||
e->interval = entry->interval;
|
||||
e->gate_mask = entry->gate_mask;
|
||||
i++;
|
||||
}
|
||||
|
||||
offload->num_entries = i;
|
||||
}
|
||||
|
||||
static int taprio_enable_offload(struct net_device *dev,
|
||||
struct tc_mqprio_qopt *mqprio,
|
||||
struct taprio_sched *q,
|
||||
struct sched_gate_list *sched,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
struct tc_taprio_qopt_offload *offload;
|
||||
int err = 0;
|
||||
|
||||
if (!ops->ndo_setup_tc) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"Device does not support taprio offload");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
offload = taprio_offload_alloc(sched->num_entries);
|
||||
if (!offload) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"Not enough memory for enabling offload mode");
|
||||
return -ENOMEM;
|
||||
}
|
||||
offload->enable = 1;
|
||||
taprio_sched_to_offload(q, sched, mqprio, offload);
|
||||
|
||||
err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
|
||||
if (err < 0) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"Device failed to setup taprio offload");
|
||||
goto done;
|
||||
}
|
||||
|
||||
taprio_offload_config_changed(q);
|
||||
|
||||
done:
|
||||
taprio_offload_free(offload);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int taprio_disable_offload(struct net_device *dev,
|
||||
struct taprio_sched *q,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
struct tc_taprio_qopt_offload *offload;
|
||||
int err;
|
||||
|
||||
if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
|
||||
return 0;
|
||||
|
||||
if (!ops->ndo_setup_tc)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
offload = taprio_offload_alloc(0);
|
||||
if (!offload) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"Not enough memory to disable offload mode");
|
||||
return -ENOMEM;
|
||||
}
|
||||
offload->enable = 0;
|
||||
|
||||
err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
|
||||
if (err < 0) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"Device failed to disable offload");
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
taprio_offload_free(offload);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* If full offload is enabled, the only possible clockid is the net device's
|
||||
* PHC. For that reason, specifying a clockid through netlink is incorrect.
|
||||
* For txtime-assist, it is implicitly assumed that the device's PHC is kept
|
||||
* in sync with the specified clockid via a user space daemon such as phc2sys.
|
||||
* For both software taprio and txtime-assist, the clockid is used for the
|
||||
* hrtimer that advances the schedule and hence mandatory.
|
||||
*/
|
||||
static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
struct net_device *dev = qdisc_dev(sch);
|
||||
int err = -EINVAL;
|
||||
|
||||
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
|
||||
const struct ethtool_ops *ops = dev->ethtool_ops;
|
||||
struct ethtool_ts_info info = {
|
||||
.cmd = ETHTOOL_GET_TS_INFO,
|
||||
.phc_index = -1,
|
||||
};
|
||||
|
||||
if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"The 'clockid' cannot be specified for full offload");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ops && ops->get_ts_info)
|
||||
err = ops->get_ts_info(dev, &info);
|
||||
|
||||
if (err || info.phc_index < 0) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"Device does not have a PTP clock");
|
||||
err = -ENOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
} else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
|
||||
int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
|
||||
|
||||
/* We only support static clockids and we don't allow
|
||||
* for it to be modified after the first init.
|
||||
*/
|
||||
if (clockid < 0 ||
|
||||
(q->clockid != -1 && q->clockid != clockid)) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"Changing the 'clockid' of a running schedule is not supported");
|
||||
err = -ENOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (clockid) {
|
||||
case CLOCK_REALTIME:
|
||||
q->tk_offset = TK_OFFS_REAL;
|
||||
break;
|
||||
case CLOCK_MONOTONIC:
|
||||
q->tk_offset = TK_OFFS_MAX;
|
||||
break;
|
||||
case CLOCK_BOOTTIME:
|
||||
q->tk_offset = TK_OFFS_BOOT;
|
||||
break;
|
||||
case CLOCK_TAI:
|
||||
q->tk_offset = TK_OFFS_TAI;
|
||||
break;
|
||||
default:
|
||||
NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
q->clockid = clockid;
|
||||
} else {
|
||||
NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
|
@ -1020,9 +1355,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
struct net_device *dev = qdisc_dev(sch);
|
||||
struct tc_mqprio_qopt *mqprio = NULL;
|
||||
u32 taprio_flags = 0;
|
||||
int i, err, clockid;
|
||||
unsigned long flags;
|
||||
ktime_t start;
|
||||
int i, err;
|
||||
|
||||
err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
|
||||
taprio_policy, extack);
|
||||
|
@ -1038,7 +1373,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
if (q->flags != 0 && q->flags != taprio_flags) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
} else if (!FLAGS_VALID(taprio_flags)) {
|
||||
} else if (!taprio_flags_valid(taprio_flags)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1078,30 +1413,19 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
goto free_sched;
|
||||
}
|
||||
|
||||
if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
|
||||
clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
|
||||
|
||||
/* We only support static clockids and we don't allow
|
||||
* for it to be modified after the first init.
|
||||
*/
|
||||
if (clockid < 0 ||
|
||||
(q->clockid != -1 && q->clockid != clockid)) {
|
||||
NL_SET_ERR_MSG(extack, "Changing the 'clockid' of a running schedule is not supported");
|
||||
err = -ENOTSUPP;
|
||||
goto free_sched;
|
||||
}
|
||||
|
||||
q->clockid = clockid;
|
||||
}
|
||||
|
||||
if (q->clockid == -1 && !tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
|
||||
NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
|
||||
err = -EINVAL;
|
||||
err = taprio_parse_clockid(sch, tb, extack);
|
||||
if (err < 0)
|
||||
goto free_sched;
|
||||
}
|
||||
|
||||
taprio_set_picos_per_byte(dev, q);
|
||||
|
||||
if (FULL_OFFLOAD_IS_ENABLED(taprio_flags))
|
||||
err = taprio_enable_offload(dev, mqprio, q, new_admin, extack);
|
||||
else
|
||||
err = taprio_disable_offload(dev, q, extack);
|
||||
if (err)
|
||||
goto free_sched;
|
||||
|
||||
/* Protects against enqueue()/dequeue() */
|
||||
spin_lock_bh(qdisc_lock(sch));
|
||||
|
||||
|
@ -1116,6 +1440,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
}
|
||||
|
||||
if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) &&
|
||||
!FULL_OFFLOAD_IS_ENABLED(taprio_flags) &&
|
||||
!hrtimer_active(&q->advance_timer)) {
|
||||
hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
|
||||
q->advance_timer.function = advance_sched;
|
||||
|
@ -1134,23 +1459,15 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
mqprio->prio_tc_map[i]);
|
||||
}
|
||||
|
||||
switch (q->clockid) {
|
||||
case CLOCK_REALTIME:
|
||||
q->tk_offset = TK_OFFS_REAL;
|
||||
break;
|
||||
case CLOCK_MONOTONIC:
|
||||
q->tk_offset = TK_OFFS_MAX;
|
||||
break;
|
||||
case CLOCK_BOOTTIME:
|
||||
q->tk_offset = TK_OFFS_BOOT;
|
||||
break;
|
||||
case CLOCK_TAI:
|
||||
q->tk_offset = TK_OFFS_TAI;
|
||||
break;
|
||||
default:
|
||||
NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
|
||||
err = -EINVAL;
|
||||
goto unlock;
|
||||
if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) {
|
||||
q->dequeue = taprio_dequeue_offload;
|
||||
q->peek = taprio_peek_offload;
|
||||
} else {
|
||||
/* Be sure to always keep the function pointers
|
||||
* in a consistent state.
|
||||
*/
|
||||
q->dequeue = taprio_dequeue_soft;
|
||||
q->peek = taprio_peek_soft;
|
||||
}
|
||||
|
||||
err = taprio_get_start_time(sch, new_admin, &start);
|
||||
|
@ -1212,6 +1529,8 @@ static void taprio_destroy(struct Qdisc *sch)
|
|||
|
||||
hrtimer_cancel(&q->advance_timer);
|
||||
|
||||
taprio_disable_offload(dev, q, NULL);
|
||||
|
||||
if (q->qdiscs) {
|
||||
for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
|
||||
qdisc_put(q->qdiscs[i]);
|
||||
|
@ -1241,6 +1560,9 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
|
|||
hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
|
||||
q->advance_timer.function = advance_sched;
|
||||
|
||||
q->dequeue = taprio_dequeue_soft;
|
||||
q->peek = taprio_peek_soft;
|
||||
|
||||
q->root = sch;
|
||||
|
||||
/* We only support static clockids. Use an invalid value as default
|
||||
|
@ -1423,7 +1745,8 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|||
if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
|
||||
goto options_error;
|
||||
|
||||
if (nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
|
||||
if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
|
||||
nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
|
||||
goto options_error;
|
||||
|
||||
if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
|
||||
|
|
Loading…
Reference in New Issue