mlx5-net-next-2021-06-22
1) Various minor cleanups and fixes from net-next branch 2) Optimize mlx5 feature check on tx and a fix to allow Vxlan with Ipsec offloads -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmDSYyYACgkQSD+KveBX +j45Bgf+M7Vg3OK4fA7ZURrbxu2EiCQB4XprO/it7YSzpiZx634DNNRzWXQ2mLJD jx5TcmAFVUKkGmx/qrPYe/Y9c9l5s6JMjwACL5aEawXtvPzI/q1KBx/n5L+CoFYw lO1IpBPpkwqIbeIl9cwata7IJ6aTeOGjqfQ//Fodfwga063Aaggig6sEcPkr0Ewe wcuJmblnw/qOkSI2BlSOyixYiVjPRDF7cAVRTBK4/DCDFiGJTiaj8w0JgfdS2zVs 3xMrYajdz7qArMcqbuQe59KojeYj4hxALUSs+s9ks8qeIrbM+hZ9sH5m0dJgM4P6 7Pg87LD6PFDV31DWF0XM3KgdqfiZxw== =aszX -----END PGP SIGNATURE----- Merge tag 'mlx5-net-next-2021-06-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-net-next-2021-06-22 1) Various minor cleanups and fixes from net-next branch 2) Optimize mlx5 feature check on tx and a fix to allow Vxlan with Ipsec offloads ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
fe87797bf2
|
@ -12,7 +12,6 @@ config MLX5_CORE
|
|||
depends on MLXFW || !MLXFW
|
||||
depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
|
||||
depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
|
||||
default n
|
||||
help
|
||||
Core driver for low level functionality of the ConnectX-4 and
|
||||
Connect-IB cards by Mellanox Technologies.
|
||||
|
@ -36,7 +35,6 @@ config MLX5_CORE_EN
|
|||
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
|
||||
select PAGE_POOL
|
||||
select DIMLIB
|
||||
default n
|
||||
help
|
||||
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
|
||||
|
||||
|
@ -141,7 +139,6 @@ config MLX5_CORE_EN_DCB
|
|||
config MLX5_CORE_IPOIB
|
||||
bool "Mellanox 5th generation network adapters (connectX series) IPoIB offloads support"
|
||||
depends on MLX5_CORE_EN
|
||||
default n
|
||||
help
|
||||
MLX5 IPoIB offloads & acceleration support.
|
||||
|
||||
|
@ -149,7 +146,6 @@ config MLX5_FPGA_IPSEC
|
|||
bool "Mellanox Technologies IPsec Innova support"
|
||||
depends on MLX5_CORE
|
||||
depends on MLX5_FPGA
|
||||
default n
|
||||
help
|
||||
Build IPsec support for the Innova family of network cards by Mellanox
|
||||
Technologies. Innova network cards are comprised of a ConnectX chip
|
||||
|
@ -163,7 +159,6 @@ config MLX5_IPSEC
|
|||
depends on XFRM_OFFLOAD
|
||||
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
|
||||
select MLX5_ACCEL
|
||||
default n
|
||||
help
|
||||
Build IPsec support for the Connect-X family of network cards by Mellanox
|
||||
Technologies.
|
||||
|
@ -176,7 +171,6 @@ config MLX5_EN_IPSEC
|
|||
depends on XFRM_OFFLOAD
|
||||
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
|
||||
depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
|
||||
default n
|
||||
help
|
||||
Build support for IPsec cryptography-offload acceleration in the NIC.
|
||||
Note: Support for hardware with this capability needs to be selected
|
||||
|
@ -189,7 +183,6 @@ config MLX5_FPGA_TLS
|
|||
depends on MLX5_CORE_EN
|
||||
depends on MLX5_FPGA
|
||||
select MLX5_EN_TLS
|
||||
default n
|
||||
help
|
||||
Build TLS support for the Innova family of network cards by Mellanox
|
||||
Technologies. Innova network cards are comprised of a ConnectX chip
|
||||
|
@ -204,7 +197,6 @@ config MLX5_TLS
|
|||
depends on MLX5_CORE_EN
|
||||
select MLX5_ACCEL
|
||||
select MLX5_EN_TLS
|
||||
default n
|
||||
help
|
||||
Build TLS support for the Connect-X family of network cards by Mellanox
|
||||
Technologies.
|
||||
|
@ -227,7 +219,6 @@ config MLX5_SW_STEERING
|
|||
config MLX5_SF
|
||||
bool "Mellanox Technologies subfunction device support using auxiliary device"
|
||||
depends on MLX5_CORE && MLX5_CORE_EN
|
||||
default n
|
||||
help
|
||||
Build support for subfuction device in the NIC. A Mellanox subfunction
|
||||
device can support RDMA, netdevice and vdpa device.
|
||||
|
|
|
@ -136,8 +136,6 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
|||
struct mlx5_wqe_eth_seg *eseg, u8 mode,
|
||||
struct xfrm_offload *xo)
|
||||
{
|
||||
struct mlx5e_swp_spec swp_spec = {};
|
||||
|
||||
/* Tunnel Mode:
|
||||
* SWP: OutL3 InL3 InL4
|
||||
* Pkt: MAC IP ESP IP L4
|
||||
|
@ -146,23 +144,58 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
|||
* SWP: OutL3 InL4
|
||||
* InL3
|
||||
* Pkt: MAC IP ESP L4
|
||||
*
|
||||
* Tunnel(VXLAN TCP/UDP) over Transport Mode
|
||||
* SWP: OutL3 InL3 InL4
|
||||
* Pkt: MAC IP ESP UDP VXLAN IP L4
|
||||
*/
|
||||
swp_spec.l3_proto = skb->protocol;
|
||||
swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
|
||||
if (swp_spec.is_tun) {
|
||||
if (xo->proto == IPPROTO_IPV6) {
|
||||
swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
|
||||
swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
|
||||
} else {
|
||||
swp_spec.tun_l3_proto = htons(ETH_P_IP);
|
||||
swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
|
||||
}
|
||||
} else {
|
||||
swp_spec.tun_l3_proto = skb->protocol;
|
||||
swp_spec.tun_l4_proto = xo->proto;
|
||||
|
||||
/* Shared settings */
|
||||
eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
|
||||
|
||||
/* Tunnel mode */
|
||||
if (mode == XFRM_MODE_TUNNEL) {
|
||||
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||
if (xo->proto == IPPROTO_IPV6)
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||
return;
|
||||
}
|
||||
|
||||
mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
|
||||
/* Transport mode */
|
||||
if (mode != XFRM_MODE_TRANSPORT)
|
||||
return;
|
||||
|
||||
if (!xo->inner_ipproto) {
|
||||
eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
if (xo->proto == IPPROTO_UDP)
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
|
||||
switch (xo->inner_ipproto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
|
||||
|
|
|
@ -93,18 +93,38 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
|
|||
void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
|
||||
struct mlx5_wqe_eth_seg *eseg);
|
||||
|
||||
static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
static inline netdev_features_t
|
||||
mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
|
||||
{
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
struct sec_path *sp = skb_sec_path(skb);
|
||||
|
||||
if (sp && sp->len) {
|
||||
if (sp && sp->len && xo) {
|
||||
struct xfrm_state *x = sp->xvec[0];
|
||||
|
||||
if (x && x->xso.offload_handle)
|
||||
return true;
|
||||
if (!x || !x->xso.offload_handle)
|
||||
goto out_disable;
|
||||
|
||||
if (xo->inner_ipproto) {
|
||||
/* Cannot support tunnel packet over IPsec tunnel mode
|
||||
* because we cannot offload three IP header csum
|
||||
*/
|
||||
if (x->props.mode == XFRM_MODE_TUNNEL)
|
||||
goto out_disable;
|
||||
|
||||
/* Only support UDP or TCP L4 checksum */
|
||||
if (xo->inner_ipproto != IPPROTO_UDP &&
|
||||
xo->inner_ipproto != IPPROTO_TCP)
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
return features;
|
||||
|
||||
}
|
||||
return false;
|
||||
|
||||
/* Disable CSUM and GSO for software IPsec */
|
||||
out_disable:
|
||||
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -120,8 +140,9 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
|
|||
}
|
||||
|
||||
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
|
||||
static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
|
||||
netdev_features_t features) { return false; }
|
||||
static inline netdev_features_t
|
||||
mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
|
||||
{ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); }
|
||||
#endif /* CONFIG_MLX5_EN_IPSEC */
|
||||
|
||||
#endif /* __MLX5E_IPSEC_RXTX_H__ */
|
||||
|
|
|
@ -4330,6 +4330,11 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
|
|||
/* Support Geneve offload for default UDP port */
|
||||
if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
|
||||
return features;
|
||||
#endif
|
||||
break;
|
||||
#ifdef CONFIG_MLX5_EN_IPSEC
|
||||
case IPPROTO_ESP:
|
||||
return mlx5e_ipsec_feature_check(skb, features);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -4347,9 +4352,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
|
|||
features = vlan_features_check(skb, features);
|
||||
features = vxlan_features_check(skb, features);
|
||||
|
||||
if (mlx5e_ipsec_feature_check(skb, netdev, features))
|
||||
return features;
|
||||
|
||||
/* Validate if the tunneled packet is being offloaded by HW */
|
||||
if (skb->encapsulation &&
|
||||
(features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
|
||||
|
|
|
@ -712,7 +712,7 @@ mlx5_eq_create_generic(struct mlx5_core_dev *dev,
|
|||
struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
|
||||
int err;
|
||||
|
||||
if (!param->affinity)
|
||||
if (!cpumask_available(param->affinity))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!eq)
|
||||
|
|
|
@ -2969,8 +2969,11 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
|
|||
return err;
|
||||
|
||||
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
|
||||
if (!steering)
|
||||
if (!steering) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
steering->dev = dev;
|
||||
dev->priv.steering = steering;
|
||||
|
||||
|
|
|
@ -479,7 +479,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
|
|||
if (!mlx5_sf_max_functions(dev))
|
||||
return 0;
|
||||
if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
|
||||
mlx5_core_err(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
|
||||
mlx5_core_err(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1024,6 +1024,7 @@ struct xfrm_offload {
|
|||
#define CRYPTO_INVALID_PROTOCOL 128
|
||||
|
||||
__u8 proto;
|
||||
__u8 inner_ipproto;
|
||||
};
|
||||
|
||||
struct sec_path {
|
||||
|
|
|
@ -565,6 +565,42 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* For partial checksum offload, the outer header checksum is calculated
|
||||
* by software and the inner header checksum is calculated by hardware.
|
||||
* This requires hardware to know the inner packet type to calculate
|
||||
* the inner header checksum. Save inner ip protocol here to avoid
|
||||
* traversing the packet in the vendor's xmit code.
|
||||
* If the encap type is IPIP, just save skb->inner_ipproto. Otherwise,
|
||||
* get the ip protocol from the IP header.
|
||||
*/
|
||||
static void xfrm_get_inner_ipproto(struct sk_buff *skb)
|
||||
{
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
const struct ethhdr *eth;
|
||||
|
||||
if (!xo)
|
||||
return;
|
||||
|
||||
if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
|
||||
xo->inner_ipproto = skb->inner_ipproto;
|
||||
return;
|
||||
}
|
||||
|
||||
if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
|
||||
return;
|
||||
|
||||
eth = (struct ethhdr *)skb_inner_mac_header(skb);
|
||||
|
||||
switch (ntohs(eth->h_proto)) {
|
||||
case ETH_P_IPV6:
|
||||
xo->inner_ipproto = inner_ipv6_hdr(skb)->nexthdr;
|
||||
break;
|
||||
case ETH_P_IP:
|
||||
xo->inner_ipproto = inner_ip_hdr(skb)->protocol;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int xfrm_output(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct net *net = dev_net(skb_dst(skb)->dev);
|
||||
|
@ -594,12 +630,15 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
|
|||
kfree_skb(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
skb->encapsulation = 1;
|
||||
|
||||
sp->olen++;
|
||||
sp->xvec[sp->len++] = x;
|
||||
xfrm_state_hold(x);
|
||||
|
||||
if (skb->encapsulation)
|
||||
xfrm_get_inner_ipproto(skb);
|
||||
skb->encapsulation = 1;
|
||||
|
||||
if (skb_is_gso(skb)) {
|
||||
if (skb->inner_protocol)
|
||||
return xfrm_output_gso(net, sk, skb);
|
||||
|
|
Loading…
Reference in New Issue