mlx5-updates-2020-04-20
This series includes misc updates and clean ups to mlx5 driver: 1) improve some comments from Hu Haowen. 2) Handles errors of netif_set_real_num_{tx,rx}_queues, from Maxim 3) IPsec and FPGA related code cleanup to prepare for ASIC devices IPsec offloads, from Raed 4) Allow partial mask for tunnel options, from Roi. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl6eEhUACgkQSD+KveBX +j6rXAf/flM2mYB8moFIqmsSgO6yDnskK9InshCcidM3gqv989dyms1NStv4SzEq U2zQLGTinlGHEkCW6BJtMQYGatGRIX9DcpZUGYQqSL18ihZ9HkB5R+Dfw8+jRDTI AIyYoAjpmnAahzJ9xbqtzMPzunlCahdzmfuNjw6mA4QterlllOc5HLqrCi/Rk93Z WNCo0WRH+6ZfGoC9em/C73n1WDb990QOl51L+8/iSNcuMCeN74Vo5Bs/lJaEKGeV DQxiE3/wijWmdpBBeVJgXPFKT10W5aPlt7DuYWvTwVJcwtIV3KFrIP5prtHuu4w1 nv09RVihQkGloSzZeM9XHNX0WU4nVg== =Q2sm -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2020-04-20' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-updates-2020-04-20 This series includes misc updates and clean ups to mlx5 driver: 1) improve some comments from Hu Haowen. 2) Handles errors of netif_set_real_num_{tx,rx}_queues, from Maxim 3) IPsec and FPGA related code cleanup to prepare for ASIC devices IPsec offloads, from Raed 4) Allow partial mask for tunnel options, from Roi. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
80ad41f28a
|
@ -58,12 +58,21 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
|
||||||
|
|
||||||
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
|
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
|
||||||
struct mlx5_accel_esp_xfrm *xfrm,
|
struct mlx5_accel_esp_xfrm *xfrm,
|
||||||
const __be32 saddr[4],
|
u32 *sa_handle)
|
||||||
const __be32 daddr[4],
|
|
||||||
const __be32 spi, bool is_ipv6)
|
|
||||||
{
|
{
|
||||||
return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, daddr,
|
__be32 saddr[4] = {}, daddr[4] = {};
|
||||||
spi, is_ipv6);
|
|
||||||
|
if (!xfrm->attrs.is_ipv6) {
|
||||||
|
saddr[3] = xfrm->attrs.saddr.a4;
|
||||||
|
daddr[3] = xfrm->attrs.daddr.a4;
|
||||||
|
} else {
|
||||||
|
memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
|
||||||
|
memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
|
||||||
|
}
|
||||||
|
|
||||||
|
return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr,
|
||||||
|
daddr, xfrm->attrs.spi,
|
||||||
|
xfrm->attrs.is_ipv6, sa_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5_accel_esp_free_hw_context(void *context)
|
void mlx5_accel_esp_free_hw_context(void *context)
|
||||||
|
|
|
@ -48,9 +48,7 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
|
||||||
|
|
||||||
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
|
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
|
||||||
struct mlx5_accel_esp_xfrm *xfrm,
|
struct mlx5_accel_esp_xfrm *xfrm,
|
||||||
const __be32 saddr[4],
|
u32 *sa_handle);
|
||||||
const __be32 daddr[4],
|
|
||||||
const __be32 spi, bool is_ipv6);
|
|
||||||
void mlx5_accel_esp_free_hw_context(void *context);
|
void mlx5_accel_esp_free_hw_context(void *context);
|
||||||
|
|
||||||
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
|
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
|
||||||
|
@ -64,9 +62,7 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
|
||||||
static inline void *
|
static inline void *
|
||||||
mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
|
mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
|
||||||
struct mlx5_accel_esp_xfrm *xfrm,
|
struct mlx5_accel_esp_xfrm *xfrm,
|
||||||
const __be32 saddr[4],
|
u32 *sa_handle)
|
||||||
const __be32 daddr[4],
|
|
||||||
const __be32 spi, bool is_ipv6)
|
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -684,7 +684,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
|
||||||
get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]);
|
get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]);
|
||||||
|
|
||||||
while (block_timestamp > tracer->last_timestamp) {
|
while (block_timestamp > tracer->last_timestamp) {
|
||||||
/* Check block override if its not the first block */
|
/* Check block override if it's not the first block */
|
||||||
if (!tracer->last_timestamp) {
|
if (!tracer->last_timestamp) {
|
||||||
u64 *ts_event;
|
u64 *ts_event;
|
||||||
/* To avoid block override be the HW in case of buffer
|
/* To avoid block override be the HW in case of buffer
|
||||||
|
|
|
@ -102,7 +102,7 @@ static inline void
|
||||||
mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
|
mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
|
||||||
struct mlx5_wqe_ctrl_seg *ctrl)
|
struct mlx5_wqe_ctrl_seg *ctrl)
|
||||||
{
|
{
|
||||||
ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
|
ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
|
||||||
/* ensure wqe is visible to device before updating doorbell record */
|
/* ensure wqe is visible to device before updating doorbell record */
|
||||||
dma_wmb();
|
dma_wmb();
|
||||||
|
|
||||||
|
|
|
@ -42,6 +42,8 @@
|
||||||
#include "en/txrx.h"
|
#include "en/txrx.h"
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_GENEVE)
|
#if IS_ENABLED(CONFIG_GENEVE)
|
||||||
|
#include <net/geneve.h>
|
||||||
|
|
||||||
static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
|
static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
|
||||||
{
|
{
|
||||||
return mlx5_tx_swp_supported(mdev);
|
return mlx5_tx_swp_supported(mdev);
|
||||||
|
|
|
@ -75,18 +75,23 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
|
static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||||
|
unsigned int handle)
|
||||||
{
|
{
|
||||||
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
|
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
|
||||||
|
struct mlx5e_ipsec_sa_entry *_sa_entry;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL);
|
rcu_read_lock();
|
||||||
if (ret < 0)
|
hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle)
|
||||||
return ret;
|
if (_sa_entry->handle == handle) {
|
||||||
|
rcu_read_unlock();
|
||||||
|
return -EEXIST;
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
|
spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
|
||||||
sa_entry->handle = ret;
|
sa_entry->handle = handle;
|
||||||
hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
|
hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
|
||||||
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
|
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
|
||||||
|
|
||||||
|
@ -103,15 +108,6 @@ static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||||
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
|
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
|
|
||||||
{
|
|
||||||
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
|
|
||||||
|
|
||||||
/* xfrm already doing sync rcu between del and free callbacks */
|
|
||||||
|
|
||||||
ida_simple_remove(&ipsec->halloc, sa_entry->handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
|
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||||
{
|
{
|
||||||
struct xfrm_replay_state_esn *replay_esn;
|
struct xfrm_replay_state_esn *replay_esn;
|
||||||
|
@ -199,6 +195,14 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||||
attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
|
attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
|
||||||
MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
|
MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
|
||||||
MLX5_ACCEL_ESP_FLAGS_TUNNEL;
|
MLX5_ACCEL_ESP_FLAGS_TUNNEL;
|
||||||
|
|
||||||
|
/* spi */
|
||||||
|
attrs->spi = x->id.spi;
|
||||||
|
|
||||||
|
/* source , destination ips */
|
||||||
|
memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
|
||||||
|
memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
|
||||||
|
attrs->is_ipv6 = (x->props.family != AF_INET);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
|
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
|
||||||
|
@ -284,8 +288,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
|
||||||
struct net_device *netdev = x->xso.dev;
|
struct net_device *netdev = x->xso.dev;
|
||||||
struct mlx5_accel_esp_xfrm_attrs attrs;
|
struct mlx5_accel_esp_xfrm_attrs attrs;
|
||||||
struct mlx5e_priv *priv;
|
struct mlx5e_priv *priv;
|
||||||
__be32 saddr[4] = {0}, daddr[4] = {0}, spi;
|
unsigned int sa_handle;
|
||||||
bool is_ipv6 = false;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
priv = netdev_priv(netdev);
|
priv = netdev_priv(netdev);
|
||||||
|
@ -303,20 +306,6 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
|
||||||
sa_entry->x = x;
|
sa_entry->x = x;
|
||||||
sa_entry->ipsec = priv->ipsec;
|
sa_entry->ipsec = priv->ipsec;
|
||||||
|
|
||||||
/* Add the SA to handle processed incoming packets before the add SA
|
|
||||||
* completion was received
|
|
||||||
*/
|
|
||||||
if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
|
|
||||||
err = mlx5e_ipsec_sadb_rx_add(sa_entry);
|
|
||||||
if (err) {
|
|
||||||
netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
|
|
||||||
goto err_entry;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
|
|
||||||
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* check esn */
|
/* check esn */
|
||||||
mlx5e_ipsec_update_esn_state(sa_entry);
|
mlx5e_ipsec_update_esn_state(sa_entry);
|
||||||
|
|
||||||
|
@ -327,41 +316,38 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
|
||||||
MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
|
MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
|
||||||
if (IS_ERR(sa_entry->xfrm)) {
|
if (IS_ERR(sa_entry->xfrm)) {
|
||||||
err = PTR_ERR(sa_entry->xfrm);
|
err = PTR_ERR(sa_entry->xfrm);
|
||||||
goto err_sadb_rx;
|
goto err_sa_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* create hw context */
|
/* create hw context */
|
||||||
if (x->props.family == AF_INET) {
|
|
||||||
saddr[3] = x->props.saddr.a4;
|
|
||||||
daddr[3] = x->id.daddr.a4;
|
|
||||||
} else {
|
|
||||||
memcpy(saddr, x->props.saddr.a6, sizeof(saddr));
|
|
||||||
memcpy(daddr, x->id.daddr.a6, sizeof(daddr));
|
|
||||||
is_ipv6 = true;
|
|
||||||
}
|
|
||||||
spi = x->id.spi;
|
|
||||||
sa_entry->hw_context =
|
sa_entry->hw_context =
|
||||||
mlx5_accel_esp_create_hw_context(priv->mdev,
|
mlx5_accel_esp_create_hw_context(priv->mdev,
|
||||||
sa_entry->xfrm,
|
sa_entry->xfrm,
|
||||||
saddr, daddr, spi,
|
&sa_handle);
|
||||||
is_ipv6);
|
|
||||||
if (IS_ERR(sa_entry->hw_context)) {
|
if (IS_ERR(sa_entry->hw_context)) {
|
||||||
err = PTR_ERR(sa_entry->hw_context);
|
err = PTR_ERR(sa_entry->hw_context);
|
||||||
goto err_xfrm;
|
goto err_xfrm;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
|
||||||
|
err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle);
|
||||||
|
if (err)
|
||||||
|
goto err_hw_ctx;
|
||||||
|
} else {
|
||||||
|
sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
|
||||||
|
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
|
||||||
|
}
|
||||||
|
|
||||||
x->xso.offload_handle = (unsigned long)sa_entry;
|
x->xso.offload_handle = (unsigned long)sa_entry;
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
err_hw_ctx:
|
||||||
|
mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
|
||||||
err_xfrm:
|
err_xfrm:
|
||||||
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
|
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
|
||||||
err_sadb_rx:
|
err_sa_entry:
|
||||||
if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
|
|
||||||
mlx5e_ipsec_sadb_rx_del(sa_entry);
|
|
||||||
mlx5e_ipsec_sadb_rx_free(sa_entry);
|
|
||||||
}
|
|
||||||
err_entry:
|
|
||||||
kfree(sa_entry);
|
kfree(sa_entry);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -390,9 +376,6 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
|
||||||
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
|
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
|
|
||||||
mlx5e_ipsec_sadb_rx_free(sa_entry);
|
|
||||||
|
|
||||||
kfree(sa_entry);
|
kfree(sa_entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -109,11 +109,6 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv);
|
||||||
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
|
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
|
||||||
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
|
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
|
||||||
|
|
||||||
int mlx5e_ipsec_get_count(struct mlx5e_priv *priv);
|
|
||||||
int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data);
|
|
||||||
void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv);
|
|
||||||
int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data);
|
|
||||||
|
|
||||||
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
|
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
|
||||||
unsigned int handle);
|
unsigned int handle);
|
||||||
|
|
||||||
|
@ -136,26 +131,6 @@ static inline void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv,
|
|
||||||
uint8_t *data)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* __MLX5E_IPSEC_H__ */
|
#endif /* __MLX5E_IPSEC_H__ */
|
||||||
|
|
|
@ -38,6 +38,7 @@
|
||||||
#include "accel/ipsec.h"
|
#include "accel/ipsec.h"
|
||||||
#include "fpga/sdk.h"
|
#include "fpga/sdk.h"
|
||||||
#include "en_accel/ipsec.h"
|
#include "en_accel/ipsec.h"
|
||||||
|
#include "fpga/ipsec.h"
|
||||||
|
|
||||||
static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
|
static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
|
||||||
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
|
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
|
||||||
|
@ -73,61 +74,74 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
|
||||||
#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
|
#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
|
||||||
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
|
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
|
||||||
|
|
||||||
#define NUM_IPSEC_COUNTERS (NUM_IPSEC_HW_COUNTERS + NUM_IPSEC_SW_COUNTERS)
|
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
|
||||||
|
|
||||||
int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
|
|
||||||
{
|
{
|
||||||
if (!priv->ipsec)
|
return NUM_IPSEC_SW_COUNTERS;
|
||||||
return 0;
|
|
||||||
|
|
||||||
return NUM_IPSEC_COUNTERS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data)
|
static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
|
||||||
|
|
||||||
|
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_sw)
|
||||||
{
|
{
|
||||||
unsigned int i, idx = 0;
|
unsigned int i;
|
||||||
|
|
||||||
if (!priv->ipsec)
|
if (priv->ipsec)
|
||||||
return 0;
|
for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
|
||||||
|
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||||
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
|
mlx5e_ipsec_sw_stats_desc[i].format);
|
||||||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
return idx;
|
||||||
mlx5e_ipsec_hw_stats_desc[i].format);
|
|
||||||
|
|
||||||
for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
|
|
||||||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
|
||||||
mlx5e_ipsec_sw_stats_desc[i].format);
|
|
||||||
|
|
||||||
return NUM_IPSEC_COUNTERS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
|
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
|
||||||
{
|
{
|
||||||
int ret;
|
int i;
|
||||||
|
|
||||||
if (!priv->ipsec)
|
if (priv->ipsec)
|
||||||
return;
|
for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
|
||||||
|
data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
|
||||||
|
mlx5e_ipsec_sw_stats_desc, i);
|
||||||
|
return idx;
|
||||||
|
}
|
||||||
|
|
||||||
ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
|
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
|
||||||
NUM_IPSEC_HW_COUNTERS);
|
{
|
||||||
|
return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (priv->ipsec)
|
||||||
|
ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
|
||||||
|
NUM_IPSEC_HW_COUNTERS);
|
||||||
if (ret)
|
if (ret)
|
||||||
memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats));
|
memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats));
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
|
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
|
||||||
{
|
{
|
||||||
int i, idx = 0;
|
unsigned int i;
|
||||||
|
|
||||||
if (!priv->ipsec)
|
if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
|
||||||
return 0;
|
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
|
||||||
|
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||||
|
mlx5e_ipsec_hw_stats_desc[i].format);
|
||||||
|
|
||||||
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
|
return idx;
|
||||||
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
|
|
||||||
mlx5e_ipsec_hw_stats_desc, i);
|
|
||||||
|
|
||||||
for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
|
|
||||||
data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
|
|
||||||
mlx5e_ipsec_sw_stats_desc, i);
|
|
||||||
|
|
||||||
return NUM_IPSEC_COUNTERS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
|
||||||
|
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
|
||||||
|
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
|
||||||
|
mlx5e_ipsec_hw_stats_desc,
|
||||||
|
i);
|
||||||
|
return idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
|
||||||
|
MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
|
||||||
|
|
|
@ -432,7 +432,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
|
||||||
|
|
||||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||||
*cur_params = new_channels.params;
|
*cur_params = new_channels.params;
|
||||||
mlx5e_num_channels_changed(priv);
|
err = mlx5e_num_channels_changed(priv);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2839,11 +2839,8 @@ void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
|
||||||
ETH_MAX_MTU);
|
ETH_MAX_MTU);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_netdev_set_tcs(struct net_device *netdev)
|
static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc)
|
||||||
{
|
{
|
||||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
|
||||||
int nch = priv->channels.params.num_channels;
|
|
||||||
int ntc = priv->channels.params.num_tc;
|
|
||||||
int tc;
|
int tc;
|
||||||
|
|
||||||
netdev_reset_tc(netdev);
|
netdev_reset_tc(netdev);
|
||||||
|
@ -2860,15 +2857,47 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
|
||||||
netdev_set_tc_queue(netdev, tc, nch, 0);
|
netdev_set_tc_queue(netdev, tc, nch, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv, u16 count)
|
static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
int num_txqs = count * priv->channels.params.num_tc;
|
|
||||||
int num_rxqs = count * priv->profile->rq_groups;
|
|
||||||
struct net_device *netdev = priv->netdev;
|
struct net_device *netdev = priv->netdev;
|
||||||
|
int num_txqs, num_rxqs, nch, ntc;
|
||||||
|
int old_num_txqs, old_ntc;
|
||||||
|
int err;
|
||||||
|
|
||||||
mlx5e_netdev_set_tcs(netdev);
|
old_num_txqs = netdev->real_num_tx_queues;
|
||||||
netif_set_real_num_tx_queues(netdev, num_txqs);
|
old_ntc = netdev->num_tc;
|
||||||
netif_set_real_num_rx_queues(netdev, num_rxqs);
|
|
||||||
|
nch = priv->channels.params.num_channels;
|
||||||
|
ntc = priv->channels.params.num_tc;
|
||||||
|
num_txqs = nch * ntc;
|
||||||
|
num_rxqs = nch * priv->profile->rq_groups;
|
||||||
|
|
||||||
|
mlx5e_netdev_set_tcs(netdev, nch, ntc);
|
||||||
|
|
||||||
|
err = netif_set_real_num_tx_queues(netdev, num_txqs);
|
||||||
|
if (err) {
|
||||||
|
netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
|
||||||
|
goto err_tcs;
|
||||||
|
}
|
||||||
|
err = netif_set_real_num_rx_queues(netdev, num_rxqs);
|
||||||
|
if (err) {
|
||||||
|
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
|
||||||
|
goto err_txqs;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_txqs:
|
||||||
|
/* netif_set_real_num_rx_queues could fail only when nch increased. Only
|
||||||
|
* one of nch and ntc is changed in this function. That means, the call
|
||||||
|
* to netif_set_real_num_tx_queues below should not fail, because it
|
||||||
|
* decreases the number of TX queues.
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
|
||||||
|
|
||||||
|
err_tcs:
|
||||||
|
mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
|
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
|
||||||
|
@ -2895,8 +2924,12 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
|
||||||
int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
|
int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
u16 count = priv->channels.params.num_channels;
|
u16 count = priv->channels.params.num_channels;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = mlx5e_update_netdev_queues(priv);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
mlx5e_update_netdev_queues(priv, count);
|
|
||||||
mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
|
mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
|
||||||
|
|
||||||
if (!netif_is_rxfh_configured(priv->netdev))
|
if (!netif_is_rxfh_configured(priv->netdev))
|
||||||
|
@ -5358,9 +5391,11 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
|
||||||
*/
|
*/
|
||||||
if (take_rtnl)
|
if (take_rtnl)
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
mlx5e_num_channels_changed(priv);
|
err = mlx5e_num_channels_changed(priv);
|
||||||
if (take_rtnl)
|
if (take_rtnl)
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
if (err)
|
||||||
|
goto out;
|
||||||
|
|
||||||
err = profile->init_tx(priv);
|
err = profile->init_tx(priv);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -32,8 +32,8 @@
|
||||||
|
|
||||||
#include "lib/mlx5.h"
|
#include "lib/mlx5.h"
|
||||||
#include "en.h"
|
#include "en.h"
|
||||||
#include "en_accel/ipsec.h"
|
|
||||||
#include "en_accel/tls.h"
|
#include "en_accel/tls.h"
|
||||||
|
#include "en_accel/en_accel.h"
|
||||||
|
|
||||||
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
|
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
|
@ -1424,27 +1424,6 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
|
||||||
|
|
||||||
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
|
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
|
||||||
|
|
||||||
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec)
|
|
||||||
{
|
|
||||||
return mlx5e_ipsec_get_count(priv);
|
|
||||||
}
|
|
||||||
|
|
||||||
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec)
|
|
||||||
{
|
|
||||||
return idx + mlx5e_ipsec_get_strings(priv,
|
|
||||||
data + idx * ETH_GSTRING_LEN);
|
|
||||||
}
|
|
||||||
|
|
||||||
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec)
|
|
||||||
{
|
|
||||||
return idx + mlx5e_ipsec_get_stats(priv, data + idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec)
|
|
||||||
{
|
|
||||||
mlx5e_ipsec_update_stats(priv);
|
|
||||||
}
|
|
||||||
|
|
||||||
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
|
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
|
||||||
{
|
{
|
||||||
return mlx5e_tls_get_count(priv);
|
return mlx5e_tls_get_count(priv);
|
||||||
|
@ -1714,7 +1693,6 @@ MLX5E_DEFINE_STATS_GRP(pme, 0);
|
||||||
MLX5E_DEFINE_STATS_GRP(channels, 0);
|
MLX5E_DEFINE_STATS_GRP(channels, 0);
|
||||||
MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
|
MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
|
||||||
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
|
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
|
||||||
static MLX5E_DEFINE_STATS_GRP(ipsec, 0);
|
|
||||||
static MLX5E_DEFINE_STATS_GRP(tls, 0);
|
static MLX5E_DEFINE_STATS_GRP(tls, 0);
|
||||||
|
|
||||||
/* The stats groups order is opposite to the update_stats() order calls */
|
/* The stats groups order is opposite to the update_stats() order calls */
|
||||||
|
@ -1731,7 +1709,10 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
|
||||||
&MLX5E_STATS_GRP(pcie),
|
&MLX5E_STATS_GRP(pcie),
|
||||||
&MLX5E_STATS_GRP(per_prio),
|
&MLX5E_STATS_GRP(per_prio),
|
||||||
&MLX5E_STATS_GRP(pme),
|
&MLX5E_STATS_GRP(pme),
|
||||||
&MLX5E_STATS_GRP(ipsec),
|
#ifdef CONFIG_MLX5_EN_IPSEC
|
||||||
|
&MLX5E_STATS_GRP(ipsec_sw),
|
||||||
|
&MLX5E_STATS_GRP(ipsec_hw),
|
||||||
|
#endif
|
||||||
&MLX5E_STATS_GRP(tls),
|
&MLX5E_STATS_GRP(tls),
|
||||||
&MLX5E_STATS_GRP(channels),
|
&MLX5E_STATS_GRP(channels),
|
||||||
&MLX5E_STATS_GRP(per_port_buff_congest),
|
&MLX5E_STATS_GRP(per_port_buff_congest),
|
||||||
|
|
|
@ -390,5 +390,7 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
|
||||||
extern MLX5E_DECLARE_STATS_GRP(pme);
|
extern MLX5E_DECLARE_STATS_GRP(pme);
|
||||||
extern MLX5E_DECLARE_STATS_GRP(channels);
|
extern MLX5E_DECLARE_STATS_GRP(channels);
|
||||||
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
|
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
|
||||||
|
extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
|
||||||
|
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
|
||||||
|
|
||||||
#endif /* __MLX5_EN_STATS_H__ */
|
#endif /* __MLX5_EN_STATS_H__ */
|
||||||
|
|
|
@ -171,6 +171,11 @@ struct tunnel_match_key {
|
||||||
int filter_ifindex;
|
int filter_ifindex;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct tunnel_match_enc_opts {
|
||||||
|
struct flow_dissector_key_enc_opts key;
|
||||||
|
struct flow_dissector_key_enc_opts mask;
|
||||||
|
};
|
||||||
|
|
||||||
/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
|
/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
|
||||||
* Upper TUNNEL_INFO_BITS for general tunnel info.
|
* Upper TUNNEL_INFO_BITS for general tunnel info.
|
||||||
* Lower ENC_OPTS_BITS bits for enc_opts.
|
* Lower ENC_OPTS_BITS bits for enc_opts.
|
||||||
|
@ -1824,9 +1829,7 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
|
||||||
*dont_care = false;
|
*dont_care = false;
|
||||||
|
|
||||||
if (opt->opt_class != U16_MAX ||
|
if (opt->opt_class != U16_MAX ||
|
||||||
opt->type != U8_MAX ||
|
opt->type != U8_MAX) {
|
||||||
memchr_inv(opt->opt_data, 0xFF,
|
|
||||||
opt->length * 4)) {
|
|
||||||
NL_SET_ERR_MSG(extack,
|
NL_SET_ERR_MSG(extack,
|
||||||
"Partial match of tunnel options in chain > 0 isn't supported");
|
"Partial match of tunnel options in chain > 0 isn't supported");
|
||||||
netdev_warn(priv->netdev,
|
netdev_warn(priv->netdev,
|
||||||
|
@ -1863,6 +1866,7 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
|
||||||
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
|
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
|
||||||
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
|
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
|
||||||
struct flow_match_enc_opts enc_opts_match;
|
struct flow_match_enc_opts enc_opts_match;
|
||||||
|
struct tunnel_match_enc_opts tun_enc_opts;
|
||||||
struct mlx5_rep_uplink_priv *uplink_priv;
|
struct mlx5_rep_uplink_priv *uplink_priv;
|
||||||
struct mlx5e_rep_priv *uplink_rpriv;
|
struct mlx5e_rep_priv *uplink_rpriv;
|
||||||
struct tunnel_match_key tunnel_key;
|
struct tunnel_match_key tunnel_key;
|
||||||
|
@ -1905,8 +1909,14 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
|
||||||
goto err_enc_opts;
|
goto err_enc_opts;
|
||||||
|
|
||||||
if (!enc_opts_is_dont_care) {
|
if (!enc_opts_is_dont_care) {
|
||||||
|
memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
|
||||||
|
memcpy(&tun_enc_opts.key, enc_opts_match.key,
|
||||||
|
sizeof(*enc_opts_match.key));
|
||||||
|
memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
|
||||||
|
sizeof(*enc_opts_match.mask));
|
||||||
|
|
||||||
err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
|
err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
|
||||||
enc_opts_match.key, &enc_opts_id);
|
&tun_enc_opts, &enc_opts_id);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_enc_opts;
|
goto err_enc_opts;
|
||||||
}
|
}
|
||||||
|
@ -4707,7 +4717,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
|
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
|
||||||
{
|
{
|
||||||
const size_t sz_enc_opts = sizeof(struct flow_dissector_key_enc_opts);
|
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
|
||||||
struct mlx5_rep_uplink_priv *uplink_priv;
|
struct mlx5_rep_uplink_priv *uplink_priv;
|
||||||
struct mlx5e_rep_priv *priv;
|
struct mlx5e_rep_priv *priv;
|
||||||
struct mapping_ctx *mapping;
|
struct mapping_ctx *mapping;
|
||||||
|
@ -4802,7 +4812,7 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
|
||||||
u32 tunnel_id)
|
u32 tunnel_id)
|
||||||
{
|
{
|
||||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||||
struct flow_dissector_key_enc_opts enc_opts = {};
|
struct tunnel_match_enc_opts enc_opts = {};
|
||||||
struct mlx5_rep_uplink_priv *uplink_priv;
|
struct mlx5_rep_uplink_priv *uplink_priv;
|
||||||
struct mlx5e_rep_priv *uplink_rpriv;
|
struct mlx5e_rep_priv *uplink_rpriv;
|
||||||
struct metadata_dst *tun_dst;
|
struct metadata_dst *tun_dst;
|
||||||
|
@ -4840,7 +4850,7 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tun_dst = tun_rx_dst(enc_opts.len);
|
tun_dst = tun_rx_dst(enc_opts.key.len);
|
||||||
if (!tun_dst) {
|
if (!tun_dst) {
|
||||||
WARN_ON_ONCE(true);
|
WARN_ON_ONCE(true);
|
||||||
return false;
|
return false;
|
||||||
|
@ -4854,9 +4864,11 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
|
||||||
key32_to_tunnel_id(key.enc_key_id.keyid),
|
key32_to_tunnel_id(key.enc_key_id.keyid),
|
||||||
TUNNEL_KEY);
|
TUNNEL_KEY);
|
||||||
|
|
||||||
if (enc_opts.len)
|
if (enc_opts.key.len)
|
||||||
ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.data,
|
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
|
||||||
enc_opts.len, enc_opts.dst_opt_type);
|
enc_opts.key.data,
|
||||||
|
enc_opts.key.len,
|
||||||
|
enc_opts.key.dst_opt_type);
|
||||||
|
|
||||||
skb_dst_set(skb, (struct dst_entry *)tun_dst);
|
skb_dst_set(skb, (struct dst_entry *)tun_dst);
|
||||||
dev = dev_get_by_index(&init_net, key.filter_ifindex);
|
dev = dev_get_by_index(&init_net, key.filter_ifindex);
|
||||||
|
|
|
@ -65,6 +65,7 @@ struct mlx5_fpga_esp_xfrm;
|
||||||
struct mlx5_fpga_ipsec_sa_ctx {
|
struct mlx5_fpga_ipsec_sa_ctx {
|
||||||
struct rhash_head hash;
|
struct rhash_head hash;
|
||||||
struct mlx5_ifc_fpga_ipsec_sa hw_sa;
|
struct mlx5_ifc_fpga_ipsec_sa hw_sa;
|
||||||
|
u32 sa_handle;
|
||||||
struct mlx5_core_dev *dev;
|
struct mlx5_core_dev *dev;
|
||||||
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
|
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
|
||||||
};
|
};
|
||||||
|
@ -119,6 +120,8 @@ struct mlx5_fpga_ipsec {
|
||||||
*/
|
*/
|
||||||
struct rb_root rules_rb;
|
struct rb_root rules_rb;
|
||||||
struct mutex rules_rb_lock; /* rules lock */
|
struct mutex rules_rb_lock; /* rules lock */
|
||||||
|
|
||||||
|
struct ida halloc;
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
|
static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
|
||||||
|
@ -602,7 +605,7 @@ static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
|
||||||
const u32 *match_c,
|
const u32 *match_c,
|
||||||
const u32 *match_v)
|
const u32 *match_v)
|
||||||
{
|
{
|
||||||
u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev);
|
u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev);
|
||||||
bool ipv6_flow;
|
bool ipv6_flow;
|
||||||
|
|
||||||
ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
|
ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
|
||||||
|
@ -666,7 +669,8 @@ void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
|
||||||
struct mlx5_accel_esp_xfrm *accel_xfrm,
|
struct mlx5_accel_esp_xfrm *accel_xfrm,
|
||||||
const __be32 saddr[4],
|
const __be32 saddr[4],
|
||||||
const __be32 daddr[4],
|
const __be32 daddr[4],
|
||||||
const __be32 spi, bool is_ipv6)
|
const __be32 spi, bool is_ipv6,
|
||||||
|
u32 *sa_handle)
|
||||||
{
|
{
|
||||||
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
|
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
|
||||||
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
|
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
|
||||||
|
@ -704,6 +708,17 @@ void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
|
||||||
goto exists;
|
goto exists;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (accel_xfrm->attrs.action & MLX5_ACCEL_ESP_ACTION_DECRYPT) {
|
||||||
|
err = ida_simple_get(&fipsec->halloc, 1, 0, GFP_KERNEL);
|
||||||
|
if (err < 0) {
|
||||||
|
context = ERR_PTR(err);
|
||||||
|
goto exists;
|
||||||
|
}
|
||||||
|
|
||||||
|
sa_ctx->sa_handle = err;
|
||||||
|
if (sa_handle)
|
||||||
|
*sa_handle = sa_ctx->sa_handle;
|
||||||
|
}
|
||||||
/* This is unbounded fpga_xfrm, try to add to hash */
|
/* This is unbounded fpga_xfrm, try to add to hash */
|
||||||
mutex_lock(&fipsec->sa_hash_lock);
|
mutex_lock(&fipsec->sa_hash_lock);
|
||||||
|
|
||||||
|
@ -744,7 +759,8 @@ delete_hash:
|
||||||
rhash_sa));
|
rhash_sa));
|
||||||
unlock_hash:
|
unlock_hash:
|
||||||
mutex_unlock(&fipsec->sa_hash_lock);
|
mutex_unlock(&fipsec->sa_hash_lock);
|
||||||
|
if (accel_xfrm->attrs.action & MLX5_ACCEL_ESP_ACTION_DECRYPT)
|
||||||
|
ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
|
||||||
exists:
|
exists:
|
||||||
mutex_unlock(&fpga_xfrm->lock);
|
mutex_unlock(&fpga_xfrm->lock);
|
||||||
kfree(sa_ctx);
|
kfree(sa_ctx);
|
||||||
|
@ -816,7 +832,7 @@ mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
|
||||||
/* create */
|
/* create */
|
||||||
return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
|
return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
|
||||||
saddr, daddr,
|
saddr, daddr,
|
||||||
spi, is_ipv6);
|
spi, is_ipv6, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -836,6 +852,10 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
|
||||||
|
MLX5_ACCEL_ESP_ACTION_DECRYPT)
|
||||||
|
ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
|
||||||
|
|
||||||
mutex_lock(&fipsec->sa_hash_lock);
|
mutex_lock(&fipsec->sa_hash_lock);
|
||||||
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
|
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
|
||||||
rhash_sa));
|
rhash_sa));
|
||||||
|
@ -1299,6 +1319,8 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
|
||||||
goto err_destroy_hash;
|
goto err_destroy_hash;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ida_init(&fdev->ipsec->halloc);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_destroy_hash:
|
err_destroy_hash:
|
||||||
|
@ -1331,6 +1353,7 @@ void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
|
||||||
if (!mlx5_fpga_is_ipsec_device(mdev))
|
if (!mlx5_fpga_is_ipsec_device(mdev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
ida_destroy(&fdev->ipsec->halloc);
|
||||||
destroy_rules_rb(&fdev->ipsec->rules_rb);
|
destroy_rules_rb(&fdev->ipsec->rules_rb);
|
||||||
rhashtable_destroy(&fdev->ipsec->sa_hash);
|
rhashtable_destroy(&fdev->ipsec->sa_hash);
|
||||||
|
|
||||||
|
|
|
@ -37,6 +37,7 @@
|
||||||
#include "accel/ipsec.h"
|
#include "accel/ipsec.h"
|
||||||
#include "fs_cmd.h"
|
#include "fs_cmd.h"
|
||||||
|
|
||||||
|
#ifdef CONFIG_MLX5_FPGA_IPSEC
|
||||||
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
|
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
|
||||||
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
|
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
|
||||||
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
|
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
|
||||||
|
@ -46,7 +47,8 @@ void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
|
||||||
struct mlx5_accel_esp_xfrm *accel_xfrm,
|
struct mlx5_accel_esp_xfrm *accel_xfrm,
|
||||||
const __be32 saddr[4],
|
const __be32 saddr[4],
|
||||||
const __be32 daddr[4],
|
const __be32 daddr[4],
|
||||||
const __be32 spi, bool is_ipv6);
|
const __be32 spi, bool is_ipv6,
|
||||||
|
u32 *sa_handle);
|
||||||
void mlx5_fpga_ipsec_delete_sa_ctx(void *context);
|
void mlx5_fpga_ipsec_delete_sa_ctx(void *context);
|
||||||
|
|
||||||
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
|
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
|
||||||
|
@ -63,5 +65,17 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
|
||||||
|
|
||||||
const struct mlx5_flow_cmds *
|
const struct mlx5_flow_cmds *
|
||||||
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
|
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
|
||||||
|
#else
|
||||||
|
static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __MLX5_FPGA_SADB_H__ */
|
static inline const struct mlx5_flow_cmds *
|
||||||
|
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
|
||||||
|
{
|
||||||
|
return mlx5_fs_cmd_get_default(type);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_MLX5_FPGA_IPSEC */
|
||||||
|
#endif /* __MLX5_FPGA_IPSEC_H__ */
|
||||||
|
|
|
@ -2359,7 +2359,7 @@ static struct mlx5_flow_root_namespace
|
||||||
struct mlx5_flow_root_namespace *root_ns;
|
struct mlx5_flow_root_namespace *root_ns;
|
||||||
struct mlx5_flow_namespace *ns;
|
struct mlx5_flow_namespace *ns;
|
||||||
|
|
||||||
if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
|
if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
|
||||||
(table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
|
(table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
|
||||||
cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
|
cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
|
||||||
|
|
||||||
|
@ -2943,7 +2943,8 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
|
if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
|
||||||
|
MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
|
||||||
err = init_egress_root_ns(steering);
|
err = init_egress_root_ns(steering);
|
||||||
if (err)
|
if (err)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
|
@ -782,7 +782,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
|
||||||
}
|
}
|
||||||
|
|
||||||
mlx5_pci_vsc_init(dev);
|
mlx5_pci_vsc_init(dev);
|
||||||
|
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_clr_master:
|
err_clr_master:
|
||||||
|
@ -1180,7 +1180,6 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
|
|
||||||
mutex_lock(&dev->intf_state_mutex);
|
mutex_lock(&dev->intf_state_mutex);
|
||||||
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
||||||
mlx5_core_warn(dev, "interface is up, NOP\n");
|
mlx5_core_warn(dev, "interface is up, NOP\n");
|
||||||
|
|
|
@ -92,6 +92,18 @@ struct mlx5_accel_esp_xfrm_attrs {
|
||||||
union {
|
union {
|
||||||
struct aes_gcm_keymat aes_gcm;
|
struct aes_gcm_keymat aes_gcm;
|
||||||
} keymat;
|
} keymat;
|
||||||
|
|
||||||
|
union {
|
||||||
|
__be32 a4;
|
||||||
|
__be32 a6[4];
|
||||||
|
} saddr;
|
||||||
|
|
||||||
|
union {
|
||||||
|
__be32 a4;
|
||||||
|
__be32 a6[4];
|
||||||
|
} daddr;
|
||||||
|
|
||||||
|
u8 is_ipv6;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_accel_esp_xfrm {
|
struct mlx5_accel_esp_xfrm {
|
||||||
|
|
Loading…
Reference in New Issue