Improve mlx5 live migration driver

From Yishai:
 
 This series improves mlx5 live migration driver in few aspects as of
 below.
 
 Refactor to enable running migration commands in parallel over the PF
 command interface.
 
 To achieve that we exposed from mlx5_core an API to let the VF be
 notified before that the PF command interface goes down/up. (e.g. PF
 reload upon health recovery).
 
 Once having the above functionality in place mlx5 vfio doesn't need any
 more to obtain the global PF lock upon using the command interface but
 can rely on the above mechanism to be in sync with the PF.
 
 This can enable parallel VFs migration over the PF command interface
 from kernel driver point of view.
 
 In addition,
 Moved to use the PF async command mode for the SAVE state command.
 This enables returning earlier to user space upon issuing successfully
 the command and improve latency by let things run in parallel.
 
 Alex, as this series touches mlx5_core we may need to send this in a
 pull request format to VFIO to avoid conflicts before acceptance.
 
 Link: https://lore.kernel.org/all/20220510090206.90374-1-yishaih@nvidia.com
 Signed-of-by: Leon Romanovsky <leonro@nvidia.com>
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQT1m3YD37UfMCUQBNwp8NhrnBAZsQUCYntY3AAKCRAp8NhrnBAZ
 scRWAP0QzEqg/Xqk/geUAQ3dliFrA2DZJm8v9B3x5tA5nEAazAD9HqC17MvDzY8T
 6KBP7G37JNg2NCkxnKnt2gCIT+O4lgA=
 =zwWT
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-lm-parallel' of https://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux into v5.19/vfio/next

Improve mlx5 live migration driver

From Yishai:

This series improves mlx5 live migration driver in few aspects as of
below.

Refactor to enable running migration commands in parallel over the PF
command interface.

To achieve that we exposed from mlx5_core an API to let the VF be
notified before that the PF command interface goes down/up. (e.g. PF
reload upon health recovery).

Once having the above functionality in place mlx5 vfio doesn't need any
more to obtain the global PF lock upon using the command interface but
can rely on the above mechanism to be in sync with the PF.

This can enable parallel VFs migration over the PF command interface
from kernel driver point of view.

In addition,
Moved to use the PF async command mode for the SAVE state command.
This enables returning earlier to user space upon issuing successfully
the command and improve latency by let things run in parallel.

Alex, as this series touches mlx5_core we may need to send this in a
pull request format to VFIO to avoid conflicts before acceptance.

Link: https://lore.kernel.org/all/20220510090206.90374-1-yishaih@nvidia.com
Signed-of-by: Leon Romanovsky <leonro@nvidia.com>
This commit is contained in:
Alex Williamson 2022-05-11 13:08:49 -06:00
commit 920df8d6ef
58 changed files with 718 additions and 5493 deletions

View File

@ -2613,7 +2613,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, create_counters); SET_DEVICE_OP(dev_ops, create_counters);
SET_DEVICE_OP(dev_ops, create_cq); SET_DEVICE_OP(dev_ops, create_cq);
SET_DEVICE_OP(dev_ops, create_flow); SET_DEVICE_OP(dev_ops, create_flow);
SET_DEVICE_OP(dev_ops, create_flow_action_esp);
SET_DEVICE_OP(dev_ops, create_qp); SET_DEVICE_OP(dev_ops, create_qp);
SET_DEVICE_OP(dev_ops, create_rwq_ind_table); SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
SET_DEVICE_OP(dev_ops, create_srq); SET_DEVICE_OP(dev_ops, create_srq);
@ -2676,7 +2675,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, modify_ah); SET_DEVICE_OP(dev_ops, modify_ah);
SET_DEVICE_OP(dev_ops, modify_cq); SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device); SET_DEVICE_OP(dev_ops, modify_device);
SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
SET_DEVICE_OP(dev_ops, modify_hw_stat); SET_DEVICE_OP(dev_ops, modify_hw_stat);
SET_DEVICE_OP(dev_ops, modify_port); SET_DEVICE_OP(dev_ops, modify_port);
SET_DEVICE_OP(dev_ops, modify_qp); SET_DEVICE_OP(dev_ops, modify_qp);

View File

@ -46,385 +46,6 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject,
return action->device->ops.destroy_flow_action(action); return action->device->ops.destroy_flow_action(action);
} }
static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs,
u32 flags, bool is_modify)
{
u64 verbs_flags = flags;
if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ESN))
verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED;
if (is_modify && uverbs_attr_is_valid(attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS))
verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS;
return verbs_flags;
};
static int validate_flow_action_esp_keymat_aes_gcm(struct ib_flow_action_attrs_esp_keymats *keymat)
{
struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm =
&keymat->keymat.aes_gcm;
if (aes_gcm->iv_algo > IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
return -EOPNOTSUPP;
if (aes_gcm->key_len != 32 &&
aes_gcm->key_len != 24 &&
aes_gcm->key_len != 16)
return -EINVAL;
if (aes_gcm->icv_len != 16 &&
aes_gcm->icv_len != 8 &&
aes_gcm->icv_len != 12)
return -EINVAL;
return 0;
}
static int (* const flow_action_esp_keymat_validate[])(struct ib_flow_action_attrs_esp_keymats *keymat) = {
[IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = validate_flow_action_esp_keymat_aes_gcm,
};
static int flow_action_esp_replay_none(struct ib_flow_action_attrs_esp_replays *replay,
bool is_modify)
{
/* This is used in order to modify an esp flow action with an enabled
* replay protection to a disabled one. This is only supported via
* modify, as in create verb we can simply drop the REPLAY attribute and
* achieve the same thing.
*/
return is_modify ? 0 : -EINVAL;
}
static int flow_action_esp_replay_def_ok(struct ib_flow_action_attrs_esp_replays *replay,
bool is_modify)
{
/* Some replay protections could always be enabled without validating
* anything.
*/
return 0;
}
static int (* const flow_action_esp_replay_validate[])(struct ib_flow_action_attrs_esp_replays *replay,
bool is_modify) = {
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = flow_action_esp_replay_none,
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = flow_action_esp_replay_def_ok,
};
static int parse_esp_ip(enum ib_flow_spec_type proto,
const void __user *val_ptr,
size_t len, union ib_flow_spec *out)
{
int ret;
const struct ib_uverbs_flow_ipv4_filter ipv4 = {
.src_ip = cpu_to_be32(0xffffffffUL),
.dst_ip = cpu_to_be32(0xffffffffUL),
.proto = 0xff,
.tos = 0xff,
.ttl = 0xff,
.flags = 0xff,
};
const struct ib_uverbs_flow_ipv6_filter ipv6 = {
.src_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
.dst_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
.flow_label = cpu_to_be32(0xffffffffUL),
.next_hdr = 0xff,
.traffic_class = 0xff,
.hop_limit = 0xff,
};
union {
struct ib_uverbs_flow_ipv4_filter ipv4;
struct ib_uverbs_flow_ipv6_filter ipv6;
} user_val = {};
const void *user_pmask;
size_t val_len;
/* If the flow IPv4/IPv6 flow specifications are extended, the mask
* should be changed as well.
*/
BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv4_filter, flags) +
sizeof(ipv4.flags) != sizeof(ipv4));
BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv6_filter, reserved) +
sizeof(ipv6.reserved) != sizeof(ipv6));
switch (proto) {
case IB_FLOW_SPEC_IPV4:
if (len > sizeof(user_val.ipv4) &&
!ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv4),
len - sizeof(user_val.ipv4)))
return -EOPNOTSUPP;
val_len = min_t(size_t, len, sizeof(user_val.ipv4));
ret = copy_from_user(&user_val.ipv4, val_ptr,
val_len);
if (ret)
return -EFAULT;
user_pmask = &ipv4;
break;
case IB_FLOW_SPEC_IPV6:
if (len > sizeof(user_val.ipv6) &&
!ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv6),
len - sizeof(user_val.ipv6)))
return -EOPNOTSUPP;
val_len = min_t(size_t, len, sizeof(user_val.ipv6));
ret = copy_from_user(&user_val.ipv6, val_ptr,
val_len);
if (ret)
return -EFAULT;
user_pmask = &ipv6;
break;
default:
return -EOPNOTSUPP;
}
return ib_uverbs_kern_spec_to_ib_spec_filter(proto, user_pmask,
&user_val,
val_len, out);
}
static int flow_action_esp_get_encap(struct ib_flow_spec_list *out,
struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_flow_action_esp_encap uverbs_encap;
int ret;
ret = uverbs_copy_from(&uverbs_encap, attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP);
if (ret)
return ret;
/* We currently support only one encap */
if (uverbs_encap.next_ptr)
return -EOPNOTSUPP;
if (uverbs_encap.type != IB_FLOW_SPEC_IPV4 &&
uverbs_encap.type != IB_FLOW_SPEC_IPV6)
return -EOPNOTSUPP;
return parse_esp_ip(uverbs_encap.type,
u64_to_user_ptr(uverbs_encap.val_ptr),
uverbs_encap.len,
&out->spec);
}
struct ib_flow_action_esp_attr {
struct ib_flow_action_attrs_esp hdr;
struct ib_flow_action_attrs_esp_keymats keymat;
struct ib_flow_action_attrs_esp_replays replay;
/* We currently support only one spec */
struct ib_flow_spec_list encap;
};
#define ESP_LAST_SUPPORTED_FLAG IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW
static int parse_flow_action_esp(struct ib_device *ib_dev,
struct uverbs_attr_bundle *attrs,
struct ib_flow_action_esp_attr *esp_attr,
bool is_modify)
{
struct ib_uverbs_flow_action_esp uverbs_esp = {};
int ret;
/* Optional param, if it doesn't exist, we get -ENOENT and skip it */
ret = uverbs_copy_from(&esp_attr->hdr.esn, attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_ESN);
if (IS_UVERBS_COPY_ERR(ret))
return ret;
/* This can be called from FLOW_ACTION_ESP_MODIFY where
* UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS is optional
*/
if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS)) {
ret = uverbs_copy_from_or_zero(&uverbs_esp, attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS);
if (ret)
return ret;
if (uverbs_esp.flags & ~((ESP_LAST_SUPPORTED_FLAG << 1) - 1))
return -EOPNOTSUPP;
esp_attr->hdr.spi = uverbs_esp.spi;
esp_attr->hdr.seq = uverbs_esp.seq;
esp_attr->hdr.tfc_pad = uverbs_esp.tfc_pad;
esp_attr->hdr.hard_limit_pkts = uverbs_esp.hard_limit_pkts;
}
esp_attr->hdr.flags = esp_flags_uverbs_to_verbs(attrs, uverbs_esp.flags,
is_modify);
if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT)) {
esp_attr->keymat.protocol =
uverbs_attr_get_enum_id(attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT);
ret = uverbs_copy_from_or_zero(&esp_attr->keymat.keymat,
attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT);
if (ret)
return ret;
ret = flow_action_esp_keymat_validate[esp_attr->keymat.protocol](&esp_attr->keymat);
if (ret)
return ret;
esp_attr->hdr.keymat = &esp_attr->keymat;
}
if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY)) {
esp_attr->replay.protocol =
uverbs_attr_get_enum_id(attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY);
ret = uverbs_copy_from_or_zero(&esp_attr->replay.replay,
attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY);
if (ret)
return ret;
ret = flow_action_esp_replay_validate[esp_attr->replay.protocol](&esp_attr->replay,
is_modify);
if (ret)
return ret;
esp_attr->hdr.replay = &esp_attr->replay;
}
if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP)) {
ret = flow_action_esp_get_encap(&esp_attr->encap, attrs);
if (ret)
return ret;
esp_attr->hdr.encap = &esp_attr->encap;
}
return 0;
}
static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs, UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE);
struct ib_device *ib_dev = attrs->context->device;
int ret;
struct ib_flow_action *action;
struct ib_flow_action_esp_attr esp_attr = {};
if (!ib_dev->ops.create_flow_action_esp)
return -EOPNOTSUPP;
ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false);
if (ret)
return ret;
/* No need to check as this attribute is marked as MANDATORY */
action = ib_dev->ops.create_flow_action_esp(ib_dev, &esp_attr.hdr,
attrs);
if (IS_ERR(action))
return PTR_ERR(action);
uverbs_flow_action_fill_action(action, uobj, ib_dev,
IB_FLOW_ACTION_ESP);
return 0;
}
static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(
struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs, UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE);
struct ib_flow_action *action = uobj->object;
int ret;
struct ib_flow_action_esp_attr esp_attr = {};
if (!action->device->ops.modify_flow_action_esp)
return -EOPNOTSUPP;
ret = parse_flow_action_esp(action->device, attrs, &esp_attr, true);
if (ret)
return ret;
if (action->type != IB_FLOW_ACTION_ESP)
return -EINVAL;
return action->device->ops.modify_flow_action_esp(action,
&esp_attr.hdr,
attrs);
}
static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = {
[IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = {
.type = UVERBS_ATTR_TYPE_PTR_IN,
UVERBS_ATTR_STRUCT(
struct ib_uverbs_flow_action_esp_keymat_aes_gcm,
aes_key),
},
};
static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = {
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = {
.type = UVERBS_ATTR_TYPE_PTR_IN,
UVERBS_ATTR_NO_DATA(),
},
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = {
.type = UVERBS_ATTR_TYPE_PTR_IN,
UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp,
size),
},
};
DECLARE_UVERBS_NAMED_METHOD(
UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE,
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_ACCESS_NEW,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
hard_limit_pkts),
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
UVERBS_ATTR_TYPE(__u32),
UA_OPTIONAL),
UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
uverbs_flow_action_esp_keymat,
UA_MANDATORY),
UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
uverbs_flow_action_esp_replay,
UA_OPTIONAL),
UVERBS_ATTR_PTR_IN(
UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD(
UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY,
UVERBS_ATTR_IDR(UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE,
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_ACCESS_WRITE,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
hard_limit_pkts),
UA_OPTIONAL),
UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
UVERBS_ATTR_TYPE(__u32),
UA_OPTIONAL),
UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
uverbs_flow_action_esp_keymat,
UA_OPTIONAL),
UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
uverbs_flow_action_esp_replay,
UA_OPTIONAL),
UVERBS_ATTR_PTR_IN(
UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD_DESTROY( DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_FLOW_ACTION_DESTROY, UVERBS_METHOD_FLOW_ACTION_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
@ -435,9 +56,7 @@ DECLARE_UVERBS_NAMED_METHOD_DESTROY(
DECLARE_UVERBS_NAMED_OBJECT( DECLARE_UVERBS_NAMED_OBJECT(
UVERBS_OBJECT_FLOW_ACTION, UVERBS_OBJECT_FLOW_ACTION,
UVERBS_TYPE_ALLOC_IDR(uverbs_free_flow_action), UVERBS_TYPE_ALLOC_IDR(uverbs_free_flow_action),
&UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE), &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY));
&UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY),
&UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY));
const struct uapi_definition uverbs_def_obj_flow_action[] = { const struct uapi_definition uverbs_def_obj_flow_action[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED( UAPI_DEF_CHAIN_OBJ_TREE_NAMED(

View File

@ -15,7 +15,6 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/mlx5/fs_helpers.h> #include <linux/mlx5/fs_helpers.h>
#include <linux/mlx5/accel.h>
#include <linux/mlx5/eswitch.h> #include <linux/mlx5/eswitch.h>
#include <net/inet_ecn.h> #include <net/inet_ecn.h>
#include "mlx5_ib.h" #include "mlx5_ib.h"
@ -148,16 +147,6 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
{ {
switch (maction->ib_action.type) { switch (maction->ib_action.type) {
case IB_FLOW_ACTION_ESP:
if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
return -EINVAL;
/* Currently only AES_GCM keymat is supported by the driver */
action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
action->action |= is_egress ?
MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
return 0;
case IB_FLOW_ACTION_UNSPECIFIED: case IB_FLOW_ACTION_UNSPECIFIED:
if (maction->flow_action_raw.sub_type == if (maction->flow_action_raw.sub_type ==
MLX5_IB_FLOW_ACTION_MODIFY_HEADER) { MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
@ -368,14 +357,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev,
ib_spec->type & IB_FLOW_SPEC_INNER); ib_spec->type & IB_FLOW_SPEC_INNER);
break; break;
case IB_FLOW_SPEC_ESP: case IB_FLOW_SPEC_ESP:
if (ib_spec->esp.mask.seq)
return -EOPNOTSUPP; return -EOPNOTSUPP;
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
ntohl(ib_spec->esp.mask.spi));
MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
ntohl(ib_spec->esp.val.spi));
break;
case IB_FLOW_SPEC_TCP: case IB_FLOW_SPEC_TCP:
if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
LAST_TCP_UDP_FIELD)) LAST_TCP_UDP_FIELD))
@ -587,47 +569,6 @@ static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
return false; return false;
} }
enum valid_spec {
VALID_SPEC_INVALID,
VALID_SPEC_VALID,
VALID_SPEC_NA,
};
static enum valid_spec
is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
const struct mlx5_flow_spec *spec,
const struct mlx5_flow_act *flow_act,
bool egress)
{
const u32 *match_c = spec->match_criteria;
bool is_crypto =
(flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
/*
* Currently only crypto is supported in egress, when regular egress
* rules would be supported, always return VALID_SPEC_NA.
*/
if (!is_crypto)
return VALID_SPEC_NA;
return is_crypto && is_ipsec &&
(!egress || (!is_drop &&
!(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
VALID_SPEC_VALID : VALID_SPEC_INVALID;
}
static bool is_valid_spec(struct mlx5_core_dev *mdev,
const struct mlx5_flow_spec *spec,
const struct mlx5_flow_act *flow_act,
bool egress)
{
/* We curretly only support ipsec egress flow */
return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
}
static bool is_valid_ethertype(struct mlx5_core_dev *mdev, static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
const struct ib_flow_attr *flow_attr, const struct ib_flow_attr *flow_attr,
bool check_inner) bool check_inner)
@ -1154,8 +1095,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
if (is_egress && if (is_egress) {
!is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
err = -EINVAL; err = -EINVAL;
goto free; goto free;
} }
@ -1740,149 +1680,6 @@ unlock:
return ERR_PTR(err); return ERR_PTR(err);
} }
static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
{
u32 flags = 0;
if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
return flags;
}
#define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED \
MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
static struct ib_flow_action *
mlx5_ib_create_flow_action_esp(struct ib_device *device,
const struct ib_flow_action_attrs_esp *attr,
struct uverbs_attr_bundle *attrs)
{
struct mlx5_ib_dev *mdev = to_mdev(device);
struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
struct mlx5_ib_flow_action *action;
u64 action_flags;
u64 flags;
int err = 0;
err = uverbs_get_flags64(
&action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
if (err)
return ERR_PTR(err);
flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
/* We current only support a subset of the standard features. Only a
* keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
* (with overlap). Full offload mode isn't supported.
*/
if (!attr->keymat || attr->replay || attr->encap ||
attr->spi || attr->seq || attr->tfc_pad ||
attr->hard_limit_pkts ||
(attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
return ERR_PTR(-EOPNOTSUPP);
if (attr->keymat->protocol !=
IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
return ERR_PTR(-EOPNOTSUPP);
aes_gcm = &attr->keymat->keymat.aes_gcm;
if (aes_gcm->icv_len != 16 ||
aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
return ERR_PTR(-EOPNOTSUPP);
action = kmalloc(sizeof(*action), GFP_KERNEL);
if (!action)
return ERR_PTR(-ENOMEM);
action->esp_aes_gcm.ib_flags = attr->flags;
memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
sizeof(accel_attrs.keymat.aes_gcm.aes_key));
accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
sizeof(accel_attrs.keymat.aes_gcm.salt));
memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
accel_attrs.esn = attr->esn;
if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
action->esp_aes_gcm.ctx =
mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
if (IS_ERR(action->esp_aes_gcm.ctx)) {
err = PTR_ERR(action->esp_aes_gcm.ctx);
goto err_parse;
}
action->esp_aes_gcm.ib_flags = attr->flags;
return &action->ib_action;
err_parse:
kfree(action);
return ERR_PTR(err);
}
static int
mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
const struct ib_flow_action_attrs_esp *attr,
struct uverbs_attr_bundle *attrs)
{
struct mlx5_ib_flow_action *maction = to_mflow_act(action);
struct mlx5_accel_esp_xfrm_attrs accel_attrs;
int err = 0;
if (attr->keymat || attr->replay || attr->encap ||
attr->spi || attr->seq || attr->tfc_pad ||
attr->hard_limit_pkts ||
(attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
return -EOPNOTSUPP;
/* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
* be modified.
*/
if (!(maction->esp_aes_gcm.ib_flags &
IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
return -EINVAL;
memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
sizeof(accel_attrs));
accel_attrs.esn = attr->esn;
if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
else
accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
&accel_attrs);
if (err)
return err;
maction->esp_aes_gcm.ib_flags &=
~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
maction->esp_aes_gcm.ib_flags |=
attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
return 0;
}
static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction) static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
{ {
switch (maction->flow_action_raw.sub_type) { switch (maction->flow_action_raw.sub_type) {
@ -1906,13 +1703,6 @@ static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
struct mlx5_ib_flow_action *maction = to_mflow_act(action); struct mlx5_ib_flow_action *maction = to_mflow_act(action);
switch (action->type) { switch (action->type) {
case IB_FLOW_ACTION_ESP:
/*
* We only support aes_gcm by now, so we implicitly know this is
* the underline crypto.
*/
mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
break;
case IB_FLOW_ACTION_UNSPECIFIED: case IB_FLOW_ACTION_UNSPECIFIED:
destroy_flow_action_raw(maction); destroy_flow_action_raw(maction);
break; break;
@ -2709,11 +2499,6 @@ static const struct ib_device_ops flow_ops = {
.destroy_flow_action = mlx5_ib_destroy_flow_action, .destroy_flow_action = mlx5_ib_destroy_flow_action,
}; };
static const struct ib_device_ops flow_ipsec_ops = {
.create_flow_action_esp = mlx5_ib_create_flow_action_esp,
.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
};
int mlx5_ib_fs_init(struct mlx5_ib_dev *dev) int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
{ {
dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL); dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
@ -2724,9 +2509,5 @@ int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
mutex_init(&dev->flow_db->lock); mutex_init(&dev->flow_db->lock);
ib_set_device_ops(&dev->ib_dev, &flow_ops); ib_set_device_ops(&dev->ib_dev, &flow_ops);
if (mlx5_accel_ipsec_device_caps(dev->mdev) &
MLX5_ACCEL_IPSEC_CAP_DEVICE)
ib_set_device_ops(&dev->ib_dev, &flow_ipsec_ops);
return 0; return 0;
} }

View File

@ -41,7 +41,6 @@
#include "wr.h" #include "wr.h"
#include "restrack.h" #include "restrack.h"
#include "counters.h" #include "counters.h"
#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h> #include <rdma/uverbs_std_types.h>
#include <rdma/uverbs_ioctl.h> #include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h> #include <rdma/mlx5_user_ioctl_verbs.h>
@ -906,10 +905,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_RX_HASH_SRC_PORT_UDP | MLX5_RX_HASH_SRC_PORT_UDP |
MLX5_RX_HASH_DST_PORT_UDP | MLX5_RX_HASH_DST_PORT_UDP |
MLX5_RX_HASH_INNER; MLX5_RX_HASH_INNER;
if (mlx5_accel_ipsec_device_caps(dev->mdev) &
MLX5_ACCEL_IPSEC_CAP_DEVICE)
resp.rss_caps.rx_hash_fields_mask |=
MLX5_RX_HASH_IPSEC_SPI;
resp.response_length += sizeof(resp.rss_caps); resp.response_length += sizeof(resp.rss_caps);
} }
} else { } else {
@ -1791,23 +1786,6 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
MLX5_CAP_GEN(dev->mdev, MLX5_CAP_GEN(dev->mdev,
num_of_uars_per_page) : 1; num_of_uars_per_page) : 1;
if (mlx5_accel_ipsec_device_caps(dev->mdev) &
MLX5_ACCEL_IPSEC_CAP_DEVICE) {
if (mlx5_get_flow_namespace(dev->mdev,
MLX5_FLOW_NAMESPACE_EGRESS))
resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
if (mlx5_accel_ipsec_device_caps(dev->mdev) &
MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
if (mlx5_accel_ipsec_device_caps(dev->mdev) &
MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
/* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
}
resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 : resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 :
bfregi->total_num_bfregs - bfregi->num_dyn_bfregs; bfregi->total_num_bfregs - bfregi->num_dyn_bfregs;
resp->num_ports = dev->num_ports; resp->num_ports = dev->num_ports;
@ -3604,13 +3582,6 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
&UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC), &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
&UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY)); &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_flow_action,
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
enum mlx5_ib_uapi_flow_action_flags));
ADD_UVERBS_ATTRIBUTES_SIMPLE( ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_query_context, mlx5_ib_query_context,
UVERBS_OBJECT_DEVICE, UVERBS_OBJECT_DEVICE,
@ -3628,8 +3599,6 @@ static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_std_types_defs), UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
UAPI_DEF_CHAIN(mlx5_ib_dm_defs), UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
&mlx5_ib_flow_action),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context), UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR, UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)), UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),

View File

@ -16,13 +16,9 @@ config MLX5_CORE
Core driver for low level functionality of the ConnectX-4 and Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies. Connect-IB cards by Mellanox Technologies.
config MLX5_ACCEL
bool
config MLX5_FPGA config MLX5_FPGA
bool "Mellanox Technologies Innova support" bool "Mellanox Technologies Innova support"
depends on MLX5_CORE depends on MLX5_CORE
select MLX5_ACCEL
help help
Build support for the Innova family of network cards by Mellanox Build support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip Technologies. Innova network cards are comprised of a ConnectX chip
@ -143,71 +139,21 @@ config MLX5_CORE_IPOIB
help help
MLX5 IPoIB offloads & acceleration support. MLX5 IPoIB offloads & acceleration support.
config MLX5_FPGA_IPSEC config MLX5_EN_IPSEC
bool "Mellanox Technologies IPsec Innova support"
depends on MLX5_CORE
depends on MLX5_FPGA
help
Build IPsec support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
and an FPGA chip on one board. If you select this option, the
mlx5_core driver will include the Innova FPGA core and allow building
sandbox-specific client drivers.
config MLX5_IPSEC
bool "Mellanox Technologies IPsec Connect-X support" bool "Mellanox Technologies IPsec Connect-X support"
depends on MLX5_CORE_EN depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
select MLX5_ACCEL
help
Build IPsec support for the Connect-X family of network cards by Mellanox
Technologies.
Note: If you select this option, the mlx5_core driver will include
IPsec support for the Connect-X family.
config MLX5_EN_IPSEC
bool "IPSec XFRM cryptography-offload acceleration"
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
help help
Build support for IPsec cryptography-offload acceleration in the NIC. Build support for IPsec cryptography-offload acceleration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
config MLX5_FPGA_TLS config MLX5_EN_TLS
bool "Mellanox Technologies TLS Innova support"
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
depends on MLX5_FPGA
select MLX5_EN_TLS
help
Build TLS support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
and an FPGA chip on one board. If you select this option, the
mlx5_core driver will include the Innova FPGA core and allow building
sandbox-specific client drivers.
config MLX5_TLS
bool "Mellanox Technologies TLS Connect-X support" bool "Mellanox Technologies TLS Connect-X support"
depends on TLS_DEVICE depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN depends on MLX5_CORE_EN
select MLX5_ACCEL
select MLX5_EN_TLS
help
Build TLS support for the Connect-X family of network cards by Mellanox
Technologies.
config MLX5_EN_TLS
bool
help help
Build support for TLS cryptography-offload acceleration in the NIC. Build support for TLS cryptography-offload acceleration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
config MLX5_SW_STEERING config MLX5_SW_STEERING
bool "Mellanox Technologies software-managed steering" bool "Mellanox Technologies software-managed steering"

View File

@ -28,7 +28,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o lib/crypto.o
# #
# Netdev extra # Netdev extra
@ -88,17 +88,13 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
# #
# Accelerations & FPGA # Accelerations & FPGA
# #
mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o
mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o
mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o en_accel/ipsec_fs.o en_accel/ipsec_stats.o en_accel/ipsec_fs.o \
en_accel/ipsec_offload.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \ en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o en_accel/ktls_tx.o en_accel/ktls_rx.o

View File

@ -1,36 +0,0 @@
#ifndef __MLX5E_ACCEL_H__
#define __MLX5E_ACCEL_H__
#ifdef CONFIG_MLX5_ACCEL
#include <linux/skbuff.h>
#include <linux/netdevice.h>
static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
{
__be16 *ethtype;
if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
return false;
ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
return false;
return true;
}
static inline void remove_metadata_hdr(struct sk_buff *skb)
{
struct ethhdr *old_eth;
struct ethhdr *new_eth;
/* Remove the metadata from the buffer */
old_eth = (struct ethhdr *)skb->data;
new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
memmove(new_eth, old_eth, 2 * ETH_ALEN);
/* Ethertype is already in its new place */
skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
}
#endif /* CONFIG_MLX5_ACCEL */
#endif /* __MLX5E_EN_ACCEL_H__ */

View File

@ -1,179 +0,0 @@
/*
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/mlx5/device.h>
#include "accel/ipsec.h"
#include "mlx5_core.h"
#include "fpga/ipsec.h"
#include "accel/ipsec_offload.h"
void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops;
int err = 0;
ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ?
mlx5_ipsec_offload_ops(mdev) :
mlx5_fpga_ipsec_ops(mdev);
if (!ipsec_ops || !ipsec_ops->init) {
mlx5_core_dbg(mdev, "IPsec ops is not supported\n");
return;
}
err = ipsec_ops->init(mdev);
if (err) {
mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err);
return;
}
mdev->ipsec_ops = ipsec_ops;
}
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->cleanup)
return;
ipsec_ops->cleanup(mdev);
}
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->device_caps)
return 0;
return ipsec_ops->device_caps(mdev);
}
EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps);
unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->counters_count)
return -EOPNOTSUPP;
return ipsec_ops->counters_count(mdev);
}
int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int count)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->counters_read)
return -EOPNOTSUPP;
return ipsec_ops->counters_read(mdev, counters, count);
}
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
__be32 saddr[4] = {}, daddr[4] = {};
if (!ipsec_ops || !ipsec_ops->create_hw_context)
return ERR_PTR(-EOPNOTSUPP);
if (!xfrm->attrs.is_ipv6) {
saddr[3] = xfrm->attrs.saddr.a4;
daddr[3] = xfrm->attrs.daddr.a4;
} else {
memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
}
return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi,
xfrm->attrs.is_ipv6, sa_handle);
}
void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->free_hw_context)
return;
ipsec_ops->free_hw_context(context);
}
struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
struct mlx5_accel_esp_xfrm *xfrm;
if (!ipsec_ops || !ipsec_ops->esp_create_xfrm)
return ERR_PTR(-EOPNOTSUPP);
xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags);
if (IS_ERR(xfrm))
return xfrm;
xfrm->mdev = mdev;
return xfrm;
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm)
return;
ipsec_ops->esp_destroy_xfrm(xfrm);
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);
int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm)
return -EOPNOTSUPP;
return ipsec_ops->esp_modify_xfrm(xfrm, attrs);
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);

View File

@ -1,96 +0,0 @@
/*
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __MLX5_ACCEL_IPSEC_H__
#define __MLX5_ACCEL_IPSEC_H__
#include <linux/mlx5/driver.h>
#include <linux/mlx5/accel.h>
#ifdef CONFIG_MLX5_ACCEL
#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
MLX5_ACCEL_IPSEC_CAP_DEVICE)
unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int count);
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle);
void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
struct mlx5_accel_ipsec_ops {
u32 (*device_caps)(struct mlx5_core_dev *mdev);
unsigned int (*counters_count)(struct mlx5_core_dev *mdev);
int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count);
void* (*create_hw_context)(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
const __be32 saddr[4], const __be32 daddr[4],
const __be32 spi, bool is_ipv6, u32 *sa_handle);
void (*free_hw_context)(void *context);
int (*init)(struct mlx5_core_dev *mdev);
void (*cleanup)(struct mlx5_core_dev *mdev);
struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags);
int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm);
};
#else
#define MLX5_IPSEC_DEV(mdev) false
static inline void *
mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle)
{
return NULL;
}
static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {}
static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {}
static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {}
#endif /* CONFIG_MLX5_ACCEL */
#endif /* __MLX5_ACCEL_IPSEC_H__ */

View File

@ -1,38 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#ifndef __MLX5_IPSEC_OFFLOAD_H__
#define __MLX5_IPSEC_OFFLOAD_H__
#include <linux/mlx5/driver.h>
#include "accel/ipsec.h"
#ifdef CONFIG_MLX5_IPSEC
const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev);
static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
{
if (!MLX5_CAP_GEN(mdev, ipsec_offload))
return false;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return false;
if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
return false;
return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
MLX5_CAP_ETH(mdev, insert_trailer);
}
#else
static inline const struct mlx5_accel_ipsec_ops *
mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; }
static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
{
return false;
}
#endif /* CONFIG_MLX5_IPSEC */
#endif /* __MLX5_IPSEC_OFFLOAD_H__ */

View File

@ -1,125 +0,0 @@
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/mlx5/device.h>
#include "accel/tls.h"
#include "mlx5_core.h"
#include "lib/mlx5.h"
#ifdef CONFIG_MLX5_FPGA_TLS
#include "fpga/tls.h"
int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn, u32 *p_swid,
bool direction_sx)
{
return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
start_offload_tcp_sn, p_swid,
direction_sx);
}
void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx)
{
mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
}
int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
u32 seq, __be64 rcd_sn)
{
return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
}
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_is_tls_device(mdev) ||
mlx5_accel_is_ktls_device(mdev);
}
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_tls_device_caps(mdev);
}
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_tls_init(mdev);
}
void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
{
mlx5_fpga_tls_cleanup(mdev);
}
#endif
#ifdef CONFIG_MLX5_TLS
int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info,
u32 *p_key_id)
{
u32 sz_bytes;
void *key;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
struct tls12_crypto_info_aes_gcm_128 *info =
(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
key = info->key;
sz_bytes = sizeof(info->key);
break;
}
case TLS_CIPHER_AES_GCM_256: {
struct tls12_crypto_info_aes_gcm_256 *info =
(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
key = info->key;
sz_bytes = sizeof(info->key);
break;
}
default:
return -EINVAL;
}
return mlx5_create_encryption_key(mdev, key, sz_bytes,
MLX5_ACCEL_OBJ_TLS_KEY,
p_key_id);
}
void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
{
mlx5_destroy_encryption_key(mdev, key_id);
}
#endif

View File

@ -1,156 +0,0 @@
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __MLX5_ACCEL_TLS_H__
#define __MLX5_ACCEL_TLS_H__
#include <linux/mlx5/driver.h>
#include <linux/tls.h>
#ifdef CONFIG_MLX5_TLS
int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info,
u32 *p_key_id);
void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_GEN(mdev, tls_tx);
}
static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_GEN(mdev, tls_rx);
}
static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
{
if (!mlx5_accel_is_ktls_tx(mdev) &&
!mlx5_accel_is_ktls_rx(mdev))
return false;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return false;
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
}
static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info)
{
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
if (crypto_info->version == TLS_1_2_VERSION)
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
break;
}
return false;
}
#else
static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
{ return false; }
static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
{ return false; }
static inline int
mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info,
u32 *p_key_id) { return -ENOTSUPP; }
static inline void
mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {}
static inline bool
mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
static inline bool
mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info) { return false; }
#endif
enum {
MLX5_ACCEL_TLS_TX = BIT(0),
MLX5_ACCEL_TLS_RX = BIT(1),
MLX5_ACCEL_TLS_V12 = BIT(2),
MLX5_ACCEL_TLS_V13 = BIT(3),
MLX5_ACCEL_TLS_LRO = BIT(4),
MLX5_ACCEL_TLS_IPV6 = BIT(5),
MLX5_ACCEL_TLS_AES_GCM128 = BIT(30),
MLX5_ACCEL_TLS_AES_GCM256 = BIT(31),
};
struct mlx5_ifc_tls_flow_bits {
u8 src_port[0x10];
u8 dst_port[0x10];
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
u8 ipv6[0x1];
u8 direction_sx[0x1];
u8 reserved_at_2[0x1e];
};
#ifdef CONFIG_MLX5_FPGA_TLS
int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn, u32 *p_swid,
bool direction_sx);
void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx);
int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
u32 seq, __be64 rcd_sn);
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
#else
static inline int
mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn, u32 *p_swid,
bool direction_sx) { return -ENOTSUPP; }
static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx) { }
static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
u32 seq, __be64 rcd_sn) { return 0; }
static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
{
return mlx5_accel_is_ktls_device(mdev);
}
static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
#endif
#endif /* __MLX5_ACCEL_TLS_H__ */

View File

@ -354,7 +354,6 @@ enum {
MLX5E_RQ_STATE_AM, MLX5E_RQ_STATE_AM,
MLX5E_RQ_STATE_NO_CSUM_COMPLETE, MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */ MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */ MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
}; };

View File

@ -5,8 +5,7 @@
#include "en/txrx.h" #include "en/txrx.h"
#include "en/port.h" #include "en/port.h"
#include "en_accel/en_accel.h" #include "en_accel/en_accel.h"
#include "accel/ipsec.h" #include "en_accel/ipsec_offload.h"
#include "fpga/ipsec.h"
static bool mlx5e_rx_is_xdp(struct mlx5e_params *params, static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk) struct mlx5e_xsk_param *xsk)
@ -207,7 +206,7 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
u16 stop_room; u16 stop_room;
stop_room = mlx5e_tls_get_stop_room(mdev, params); stop_room = mlx5e_ktls_get_stop_room(mdev, params);
stop_room += mlx5e_stop_room_for_max_wqe(mdev); stop_room += mlx5e_stop_room_for_max_wqe(mdev);
if (is_mpwqe) if (is_mpwqe)
/* A MPWQE can take up to the maximum-sized WQE + all the normal /* A MPWQE can take up to the maximum-sized WQE + all the normal
@ -327,9 +326,6 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return false; return false;
if (mlx5_fpga_is_ipsec_device(mdev))
return false;
if (params->xdp_prog) { if (params->xdp_prog) {
/* XSK params are not considered here. If striding RQ is in use, /* XSK params are not considered here. If striding RQ is in use,
* and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
@ -423,9 +419,6 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
int max_mtu; int max_mtu;
int i; int i;
if (mlx5_fpga_is_ipsec_device(mdev))
byte_count += MLX5E_METADATA_ETHER_LEN;
if (mlx5e_rx_is_linear_skb(params, xsk)) { if (mlx5e_rx_is_linear_skb(params, xsk)) {
int frag_stride; int frag_stride;
@ -696,8 +689,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq); void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp; bool allow_swp;
allow_swp = mlx5_geneve_tx_allowed(mdev) || allow_swp =
!!MLX5_IPSEC_DEV(mdev); mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
mlx5e_build_sq_param_common(mdev, param); mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp); MLX5_SET(sqc, sqc, allow_swp, allow_swp);
@ -804,7 +797,7 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev) static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
{ {
if (mlx5e_accel_is_ktls_rx(mdev)) if (mlx5e_is_ktls_rx(mdev))
return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
@ -833,7 +826,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param); mlx5e_build_sq_param_common(mdev, param);
param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */ param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
param->is_tls = mlx5e_accel_is_ktls_rx(mdev); param->is_tls = mlx5e_is_ktls_rx(mdev);
if (param->is_tls) if (param->is_tls)
param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */ param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));

View File

@ -37,8 +37,8 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls.h" #include "en_accel/ktls.h"
#include "en_accel/tls_rxtx.h" #include "en_accel/ktls_txrx.h"
#include "en.h" #include "en.h"
#include "en/txrx.h" #include "en/txrx.h"
@ -124,8 +124,9 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
/* May send SKBs and WQEs. */ /* May send SKBs and WQEs. */
if (mlx5e_tls_skb_offloaded(skb)) if (mlx5e_ktls_skb_offloaded(skb))
if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls))) if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
&state->tls)))
return false; return false;
#endif #endif
@ -174,7 +175,7 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
struct mlx5_wqe_inline_seg *inlseg) struct mlx5_wqe_inline_seg *inlseg)
{ {
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls); mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif #endif
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC

View File

@ -226,8 +226,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return -EINVAL; return -EINVAL;
} }
if (x->props.flags & XFRM_STATE_ESN && if (x->props.flags & XFRM_STATE_ESN &&
!(mlx5_accel_ipsec_device_caps(priv->mdev) & !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_ESN)) {
MLX5_ACCEL_IPSEC_CAP_ESN)) {
netdev_info(netdev, "Cannot offload ESN xfrm states\n"); netdev_info(netdev, "Cannot offload ESN xfrm states\n");
return -EINVAL; return -EINVAL;
} }
@ -275,8 +274,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return -EINVAL; return -EINVAL;
} }
if (x->props.family == AF_INET6 && if (x->props.family == AF_INET6 &&
!(mlx5_accel_ipsec_device_caps(priv->mdev) & !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_IPV6)) {
MLX5_ACCEL_IPSEC_CAP_IPV6)) {
netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n"); netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
return -EINVAL; return -EINVAL;
} }
@ -286,9 +284,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv, static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry) struct mlx5e_ipsec_sa_entry *sa_entry)
{ {
if (!mlx5_is_ipsec_device(priv->mdev))
return 0;
return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs, return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs,
sa_entry->ipsec_obj_id, sa_entry->ipsec_obj_id,
&sa_entry->ipsec_rule); &sa_entry->ipsec_rule);
@ -297,9 +292,6 @@ static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv, static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry) struct mlx5e_ipsec_sa_entry *sa_entry)
{ {
if (!mlx5_is_ipsec_device(priv->mdev))
return;
mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs, mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs,
&sa_entry->ipsec_rule); &sa_entry->ipsec_rule);
} }
@ -333,9 +325,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
/* create xfrm */ /* create xfrm */
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
sa_entry->xfrm = sa_entry->xfrm = mlx5_accel_esp_create_xfrm(priv->mdev, &attrs);
mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
if (IS_ERR(sa_entry->xfrm)) { if (IS_ERR(sa_entry->xfrm)) {
err = PTR_ERR(sa_entry->xfrm); err = PTR_ERR(sa_entry->xfrm);
goto err_sa_entry; goto err_sa_entry;
@ -414,7 +404,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{ {
struct mlx5e_ipsec *ipsec = NULL; struct mlx5e_ipsec *ipsec = NULL;
if (!MLX5_IPSEC_DEV(priv->mdev)) { if (!mlx5_ipsec_device_caps(priv->mdev)) {
netdev_dbg(priv->netdev, "Not an IPSec offload device\n"); netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
return 0; return 0;
} }
@ -425,10 +415,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
hash_init(ipsec->sadb_rx); hash_init(ipsec->sadb_rx);
spin_lock_init(&ipsec->sadb_rx_lock); spin_lock_init(&ipsec->sadb_rx_lock);
ida_init(&ipsec->halloc);
ipsec->en_priv = priv; ipsec->en_priv = priv;
ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0, ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
priv->netdev->name); priv->netdev->name);
if (!ipsec->wq) { if (!ipsec->wq) {
@ -452,7 +439,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
mlx5e_accel_ipsec_fs_cleanup(priv); mlx5e_accel_ipsec_fs_cleanup(priv);
destroy_workqueue(ipsec->wq); destroy_workqueue(ipsec->wq);
ida_destroy(&ipsec->halloc);
kfree(ipsec); kfree(ipsec);
priv->ipsec = NULL; priv->ipsec = NULL;
} }
@ -531,7 +517,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
struct net_device *netdev = priv->netdev; struct net_device *netdev = priv->netdev;
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) || if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
!MLX5_CAP_ETH(mdev, swp)) { !MLX5_CAP_ETH(mdev, swp)) {
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n"); mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
return; return;
@ -550,15 +536,13 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
netdev->features |= NETIF_F_HW_ESP_TX_CSUM; netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM; netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) || if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
!MLX5_CAP_ETH(mdev, swp_lso)) { !MLX5_CAP_ETH(mdev, swp_lso)) {
mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n"); mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
return; return;
} }
if (mlx5_is_ipsec_device(mdev))
netdev->gso_partial_features |= NETIF_F_GSO_ESP; netdev->gso_partial_features |= NETIF_F_GSO_ESP;
mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n"); mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
netdev->features |= NETIF_F_GSO_ESP; netdev->features |= NETIF_F_GSO_ESP;
netdev->hw_features |= NETIF_F_GSO_ESP; netdev->hw_features |= NETIF_F_GSO_ESP;

View File

@ -40,7 +40,7 @@
#include <net/xfrm.h> #include <net/xfrm.h>
#include <linux/idr.h> #include <linux/idr.h>
#include "accel/ipsec.h" #include "ipsec_offload.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
@ -55,24 +55,6 @@ struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_tx_drop_no_state; atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip; atomic64_t ipsec_tx_drop_not_ip;
atomic64_t ipsec_tx_drop_trailer; atomic64_t ipsec_tx_drop_trailer;
atomic64_t ipsec_tx_drop_metadata;
};
struct mlx5e_ipsec_stats {
u64 ipsec_dec_in_packets;
u64 ipsec_dec_out_packets;
u64 ipsec_dec_bypass_packets;
u64 ipsec_enc_in_packets;
u64 ipsec_enc_out_packets;
u64 ipsec_enc_bypass_packets;
u64 ipsec_dec_drop_packets;
u64 ipsec_dec_auth_fail_packets;
u64 ipsec_enc_drop_packets;
u64 ipsec_add_sa_success;
u64 ipsec_add_sa_fail;
u64 ipsec_del_sa_success;
u64 ipsec_del_sa_fail;
u64 ipsec_cmd_drop;
}; };
struct mlx5e_accel_fs_esp; struct mlx5e_accel_fs_esp;
@ -81,11 +63,8 @@ struct mlx5e_ipsec_tx;
struct mlx5e_ipsec { struct mlx5e_ipsec {
struct mlx5e_priv *en_priv; struct mlx5e_priv *en_priv;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
bool no_trailer; spinlock_t sadb_rx_lock; /* Protects sadb_rx */
spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */
struct ida halloc;
struct mlx5e_ipsec_sw_stats sw_stats; struct mlx5e_ipsec_sw_stats sw_stats;
struct mlx5e_ipsec_stats stats;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct mlx5e_accel_fs_esp *rx_fs; struct mlx5e_accel_fs_esp *rx_fs;
struct mlx5e_ipsec_tx *tx_fs; struct mlx5e_ipsec_tx *tx_fs;
@ -116,7 +95,6 @@ struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_rule ipsec_rule; struct mlx5e_ipsec_rule ipsec_rule;
}; };
void mlx5e_ipsec_build_inverse_table(void);
int mlx5e_ipsec_init(struct mlx5e_priv *priv); int mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv); void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv); void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
@ -125,11 +103,6 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
unsigned int handle); unsigned int handle);
#else #else
static inline void mlx5e_ipsec_build_inverse_table(void)
{
}
static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv) static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{ {
return 0; return 0;

View File

@ -2,7 +2,7 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include "accel/ipsec_offload.h" #include "ipsec_offload.h"
#include "ipsec_fs.h" #include "ipsec_fs.h"
#include "fs_core.h" #include "fs_core.h"
@ -700,9 +700,6 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
{ {
int err; int err;
if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
return -EOPNOTSUPP;
err = fs_init_tx(priv); err = fs_init_tx(priv);
if (err) if (err)
return err; return err;

View File

@ -6,10 +6,9 @@
#include "en.h" #include "en.h"
#include "ipsec.h" #include "ipsec.h"
#include "accel/ipsec_offload.h" #include "ipsec_offload.h"
#include "en/fs.h" #include "en/fs.h"
#ifdef CONFIG_MLX5_EN_IPSEC
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv); void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv); int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
@ -19,8 +18,4 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs, struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule); struct mlx5e_ipsec_rule *ipsec_rule);
#else
static inline void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) {}
static inline int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { return 0; }
#endif
#endif /* __MLX5_IPSEC_STEERING_H__ */ #endif /* __MLX5_IPSEC_STEERING_H__ */

View File

@ -1,14 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
#include "mlx5_core.h" #include "mlx5_core.h"
#include "ipsec_offload.h" #include "ipsec_offload.h"
#include "lib/mlx5.h" #include "lib/mlx5.h"
#include "en_accel/ipsec_fs.h" #include "en_accel/ipsec_fs.h"
#define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \
MLX5_ACCEL_IPSEC_CAP_LSO)
struct mlx5_ipsec_sa_ctx { struct mlx5_ipsec_sa_ctx {
struct rhash_head hash; struct rhash_head hash;
u32 enc_key_id; u32 enc_key_id;
@ -25,25 +22,37 @@ struct mlx5_ipsec_esp_xfrm {
struct mlx5_accel_esp_xfrm accel_xfrm; struct mlx5_accel_esp_xfrm accel_xfrm;
}; };
static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev) u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{ {
u32 caps = MLX5_IPSEC_DEV_BASIC_CAPS; u32 caps;
if (!mlx5_is_ipsec_device(mdev)) if (!MLX5_CAP_GEN(mdev, ipsec_offload))
return 0;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return 0;
if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
return 0;
if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) ||
!MLX5_CAP_ETH(mdev, insert_trailer))
return 0; return 0;
if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) || if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt)) !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
return 0; return 0;
caps = MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 |
MLX5_ACCEL_IPSEC_CAP_LSO;
if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) && if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) &&
MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt)) MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
caps |= MLX5_ACCEL_IPSEC_CAP_ESP; caps |= MLX5_ACCEL_IPSEC_CAP_ESP;
if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) { if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
caps |= MLX5_ACCEL_IPSEC_CAP_ESN; caps |= MLX5_ACCEL_IPSEC_CAP_ESN;
caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
}
/* We can accommodate up to 2^24 different IPsec objects /* We can accommodate up to 2^24 different IPsec objects
* because we use up to 24 bit in flow table metadata * because we use up to 24 bit in flow table metadata
@ -52,6 +61,7 @@ static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev)
WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24); WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
return caps; return caps;
} }
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static int static int
mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
@ -94,8 +104,7 @@ mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
static struct mlx5_accel_esp_xfrm * static struct mlx5_accel_esp_xfrm *
mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev, mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs, const struct mlx5_accel_esp_xfrm_attrs *attrs)
u32 flags)
{ {
struct mlx5_ipsec_esp_xfrm *mxfrm; struct mlx5_ipsec_esp_xfrm *mxfrm;
int err = 0; int err = 0;
@ -274,11 +283,6 @@ static void mlx5_ipsec_offload_delete_sa_ctx(void *context)
mutex_unlock(&mxfrm->lock); mutex_unlock(&mxfrm->lock);
} }
static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev)
{
return 0;
}
static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_obj_attrs *attrs, struct mlx5_ipsec_obj_attrs *attrs,
u32 ipsec_id) u32 ipsec_id)
@ -366,20 +370,51 @@ change_sw_xfrm_attrs:
return err; return err;
} }
static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = { void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
.device_caps = mlx5_ipsec_offload_device_caps, struct mlx5_accel_esp_xfrm *xfrm,
.create_hw_context = mlx5_ipsec_offload_create_sa_ctx, u32 *sa_handle)
.free_hw_context = mlx5_ipsec_offload_delete_sa_ctx,
.init = mlx5_ipsec_offload_init,
.esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm,
.esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm,
.esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm,
};
const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev)
{ {
if (!mlx5_ipsec_offload_device_caps(mdev)) __be32 saddr[4] = {}, daddr[4] = {};
return NULL;
return &ipsec_offload_ops; if (!xfrm->attrs.is_ipv6) {
saddr[3] = xfrm->attrs.saddr.a4;
daddr[3] = xfrm->attrs.daddr.a4;
} else {
memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
}
return mlx5_ipsec_offload_create_sa_ctx(mdev, xfrm, saddr, daddr,
xfrm->attrs.spi,
xfrm->attrs.is_ipv6, sa_handle);
}
void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
{
mlx5_ipsec_offload_delete_sa_ctx(context);
}
struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_accel_esp_xfrm *xfrm;
xfrm = mlx5_ipsec_offload_esp_create_xfrm(mdev, attrs);
if (IS_ERR(xfrm))
return xfrm;
xfrm->mdev = mdev;
return xfrm;
}
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
mlx5_ipsec_offload_esp_destroy_xfrm(xfrm);
}
int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
return mlx5_ipsec_offload_esp_modify_xfrm(xfrm, attrs);
} }

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#ifndef __MLX5_IPSEC_OFFLOAD_H__
#define __MLX5_IPSEC_OFFLOAD_H__
#include <linux/mlx5/driver.h>
#include <linux/mlx5/accel.h>
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle);
void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
#endif /* __MLX5_IPSEC_OFFLOAD_H__ */

View File

@ -34,78 +34,16 @@
#include <crypto/aead.h> #include <crypto/aead.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/esp.h> #include <net/esp.h>
#include "accel/ipsec_offload.h" #include "ipsec_offload.h"
#include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec.h" #include "en_accel/ipsec.h"
#include "accel/accel.h"
#include "en.h" #include "en.h"
enum {
MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17,
};
struct mlx5e_ipsec_rx_metadata {
unsigned char nexthdr;
__be32 sa_handle;
} __packed;
enum { enum {
MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8, MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9, MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
}; };
struct mlx5e_ipsec_tx_metadata {
__be16 mss_inv; /* 1/MSS in 16bit fixed point, only for LSO */
__be16 seq; /* LSBs of the first TCP seq, only for LSO */
u8 esp_next_proto; /* Next protocol of ESP */
} __packed;
struct mlx5e_ipsec_metadata {
unsigned char syndrome;
union {
unsigned char raw[5];
/* from FPGA to host, on successful decrypt */
struct mlx5e_ipsec_rx_metadata rx;
/* from host to FPGA */
struct mlx5e_ipsec_tx_metadata tx;
} __packed content;
/* packet type ID field */
__be16 ethertype;
} __packed;
#define MAX_LSO_MSS 2048
/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
{
return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
}
static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
{
struct mlx5e_ipsec_metadata *mdata;
struct ethhdr *eth;
if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
return ERR_PTR(-ENOMEM);
eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
skb->mac_header -= sizeof(*mdata);
mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
memmove(skb->data, skb->data + sizeof(*mdata),
2 * ETH_ALEN);
eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
return mdata;
}
static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x) static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
{ {
unsigned int alen = crypto_aead_authsize(x->data); unsigned int alen = crypto_aead_authsize(x->data);
@ -244,40 +182,6 @@ void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
skb_store_bits(skb, iv_offset, &seqno, 8); skb_store_bits(skb, iv_offset, &seqno, 8);
} }
static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
struct mlx5e_ipsec_metadata *mdata,
struct xfrm_offload *xo)
{
struct ip_esp_hdr *esph;
struct tcphdr *tcph;
if (skb_is_gso(skb)) {
/* Add LSO metadata indication */
esph = ip_esp_hdr(skb);
tcph = inner_tcp_hdr(skb);
netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
skb->network_header,
skb->transport_header,
skb->inner_network_header,
skb->inner_transport_header);
netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
skb->len, skb_shinfo(skb)->gso_size,
ntohs(tcph->source), ntohs(tcph->dest),
ntohl(tcph->seq), ntohl(esph->seq_no));
mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
} else {
mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
}
mdata->content.tx.esp_next_proto = xo->proto;
netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
mdata->syndrome, mdata->content.tx.esp_next_proto,
ntohs(mdata->content.tx.mss_inv),
ntohs(mdata->content.tx.seq));
}
void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe, void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
struct mlx5e_accel_tx_ipsec_state *ipsec_st, struct mlx5e_accel_tx_ipsec_state *ipsec_st,
struct mlx5_wqe_inline_seg *inlseg) struct mlx5_wqe_inline_seg *inlseg)
@ -298,7 +202,6 @@ static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
ipsec_st->x = x; ipsec_st->x = x;
ipsec_st->xo = xo; ipsec_st->xo = xo;
if (mlx5_is_ipsec_device(priv->mdev)) {
aead = x->data; aead = x->data;
alen = crypto_aead_authsize(aead); alen = crypto_aead_authsize(aead);
blksize = ALIGN(crypto_aead_blocksize(aead), 4); blksize = ALIGN(crypto_aead_blocksize(aead), 4);
@ -307,7 +210,6 @@ static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
tailen = plen + alen; tailen = plen + alen;
ipsec_st->plen = plen; ipsec_st->plen = plen;
ipsec_st->tailen = tailen; ipsec_st->tailen = tailen;
}
return 0; return 0;
} }
@ -340,7 +242,6 @@ void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
((struct iphdr *)skb_network_header(skb))->protocol : ((struct iphdr *)skb_network_header(skb))->protocol :
((struct ipv6hdr *)skb_network_header(skb))->nexthdr; ((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
if (mlx5_is_ipsec_device(priv->mdev)) {
eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC); eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER); eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
encap = x->encap; encap = x->encap;
@ -353,7 +254,6 @@ void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) : cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC); cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
} }
}
} }
bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
@ -363,7 +263,6 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_sa_entry *sa_entry; struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5e_ipsec_metadata *mdata;
struct xfrm_state *x; struct xfrm_state *x;
struct sec_path *sp; struct sec_path *sp;
@ -392,19 +291,8 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
goto drop; goto drop;
} }
if (MLX5_CAP_GEN(priv->mdev, fpga)) {
mdata = mlx5e_ipsec_add_metadata(skb);
if (IS_ERR(mdata)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
goto drop;
}
}
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo); sa_entry->set_iv_op(skb, x, xo);
if (MLX5_CAP_GEN(priv->mdev, fpga))
mlx5e_ipsec_set_metadata(skb, mdata, xo);
mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st); mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
return true; return true;
@ -414,79 +302,6 @@ drop:
return false; return false;
} }
static inline struct xfrm_state *
mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
struct mlx5e_ipsec_metadata *mdata)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo;
struct xfrm_state *xs;
struct sec_path *sp;
u32 sa_handle;
sp = secpath_set(skb);
if (unlikely(!sp)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
return NULL;
}
sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
if (unlikely(!xs)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
return NULL;
}
sp = skb_sec_path(skb);
sp->xvec[sp->len++] = xs;
sp->olen++;
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
switch (mdata->syndrome) {
case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
if (likely(priv->ipsec->no_trailer)) {
xo->flags |= XFRM_ESP_NO_TRAILER;
xo->proto = mdata->content.rx.nexthdr;
}
break;
case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
break;
case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO:
xo->status = CRYPTO_INVALID_PROTOCOL;
break;
default:
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
return NULL;
}
return xs;
}
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, u32 *cqe_bcnt)
{
struct mlx5e_ipsec_metadata *mdata;
struct xfrm_state *xs;
if (!is_metadata_hdr_valid(skb))
return skb;
/* Use the metadata */
mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
if (unlikely(!xs)) {
kfree_skb(skb);
return NULL;
}
remove_metadata_hdr(skb);
*cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
return skb;
}
enum { enum {
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED, MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED, MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
@ -528,8 +343,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) { switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED: case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS; xo->status = CRYPTO_SUCCESS;
if (WARN_ON_ONCE(priv->ipsec->no_trailer))
xo->flags |= XFRM_ESP_NO_TRAILER;
break; break;
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED: case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED; xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
@ -541,21 +354,3 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome); atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
} }
} }
void mlx5e_ipsec_build_inverse_table(void)
{
u16 mss_inv;
u32 mss;
/* Calculate 1/x inverse table for use in GSO data path.
* Using this table, we provide the IPSec accelerator with the value of
* 1/gso_size so that it can infer the position of each segment inside
* the GSO, and increment the ESP sequence number, and generate the IV.
* The HW needs this value in Q0.16 fixed-point number format
*/
mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
for (mss = 2; mss < MAX_LSO_MSS; mss++) {
mss_inv = div_u64(1ULL << 32, mss) >> 16;
mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
}
}

View File

@ -53,9 +53,6 @@ struct mlx5e_accel_tx_ipsec_state {
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_inverse_table_init(void); void mlx5e_ipsec_inverse_table_init(void);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo); struct xfrm_offload *xo);

View File

@ -35,27 +35,9 @@
#include <net/sock.h> #include <net/sock.h>
#include "en.h" #include "en.h"
#include "accel/ipsec.h" #include "ipsec_offload.h"
#include "fpga/sdk.h" #include "fpga/sdk.h"
#include "en_accel/ipsec.h" #include "en_accel/ipsec.h"
#include "fpga/ipsec.h"
static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_out_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_bypass_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_in_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_out_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_bypass_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_drop_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_auth_fail_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_drop_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_success) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_fail) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_success) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_fail) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_cmd_drop) },
};
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = { static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
@ -65,13 +47,11 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_trailer) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_trailer) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_metadata) },
}; };
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc) #define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
@ -103,45 +83,4 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
return idx; return idx;
} }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{
return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
{
int ret = 0;
if (priv->ipsec)
ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
NUM_IPSEC_HW_COUNTERS);
if (ret)
memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats));
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
{
unsigned int i;
if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
mlx5e_ipsec_hw_stats_desc[i].format);
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
{
int i;
if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
mlx5e_ipsec_hw_stats_desc,
i);
return idx;
}
MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0); MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);

View File

@ -2,11 +2,49 @@
// Copyright (c) 2019 Mellanox Technologies. // Copyright (c) 2019 Mellanox Technologies.
#include "en.h" #include "en.h"
#include "en_accel/tls.h" #include "lib/mlx5.h"
#include "en_accel/ktls.h" #include "en_accel/ktls.h"
#include "en_accel/ktls_utils.h" #include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h" #include "en_accel/fs_tcp.h"
int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info,
u32 *p_key_id)
{
u32 sz_bytes;
void *key;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
struct tls12_crypto_info_aes_gcm_128 *info =
(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
key = info->key;
sz_bytes = sizeof(info->key);
break;
}
case TLS_CIPHER_AES_GCM_256: {
struct tls12_crypto_info_aes_gcm_256 *info =
(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
key = info->key;
sz_bytes = sizeof(info->key);
break;
}
default:
return -EINVAL;
}
return mlx5_create_encryption_key(mdev, key, sz_bytes,
MLX5_ACCEL_OBJ_TLS_KEY,
p_key_id);
}
void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
{
mlx5_destroy_encryption_key(mdev, key_id);
}
static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
enum tls_offload_ctx_dir direction, enum tls_offload_ctx_dir direction,
struct tls_crypto_info *crypto_info, struct tls_crypto_info *crypto_info,
@ -59,15 +97,15 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev; struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
if (!mlx5e_accel_is_ktls_tx(mdev) && !mlx5e_accel_is_ktls_rx(mdev)) if (!mlx5e_is_ktls_tx(mdev) && !mlx5e_is_ktls_rx(mdev))
return; return;
if (mlx5e_accel_is_ktls_tx(mdev)) { if (mlx5e_is_ktls_tx(mdev)) {
netdev->hw_features |= NETIF_F_HW_TLS_TX; netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->features |= NETIF_F_HW_TLS_TX; netdev->features |= NETIF_F_HW_TLS_TX;
} }
if (mlx5e_accel_is_ktls_rx(mdev)) if (mlx5e_is_ktls_rx(mdev))
netdev->hw_features |= NETIF_F_HW_TLS_RX; netdev->hw_features |= NETIF_F_HW_TLS_RX;
netdev->tlsdev_ops = &mlx5e_ktls_ops; netdev->tlsdev_ops = &mlx5e_ktls_ops;
@ -92,7 +130,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{ {
int err; int err;
if (!mlx5e_accel_is_ktls_rx(priv->mdev)) if (!mlx5e_is_ktls_rx(priv->mdev))
return 0; return 0;
priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx"); priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
@ -112,7 +150,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv) void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
{ {
if (!mlx5e_accel_is_ktls_rx(priv->mdev)) if (!mlx5e_is_ktls_rx(priv->mdev))
return; return;
if (priv->netdev->features & NETIF_F_HW_TLS_RX) if (priv->netdev->features & NETIF_F_HW_TLS_RX)
@ -120,3 +158,24 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
destroy_workqueue(priv->tls->rx_wq); destroy_workqueue(priv->tls->rx_wq);
} }
int mlx5e_ktls_init(struct mlx5e_priv *priv)
{
struct mlx5e_tls *tls;
if (!mlx5e_is_ktls_device(priv->mdev))
return 0;
tls = kzalloc(sizeof(*tls), GFP_KERNEL);
if (!tls)
return -ENOMEM;
priv->tls = tls;
return 0;
}
void mlx5e_ktls_cleanup(struct mlx5e_priv *priv)
{
kfree(priv->tls);
priv->tls = NULL;
}

View File

@ -4,9 +4,42 @@
#ifndef __MLX5E_KTLS_H__ #ifndef __MLX5E_KTLS_H__
#define __MLX5E_KTLS_H__ #define __MLX5E_KTLS_H__
#include <linux/tls.h>
#include <net/tls.h>
#include "en.h" #include "en.h"
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info,
u32 *p_key_id);
void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev)
{
if (is_kdump_kernel())
return false;
if (!MLX5_CAP_GEN(mdev, tls_tx) && !MLX5_CAP_GEN(mdev, tls_rx))
return false;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return false;
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
}
static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info)
{
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
if (crypto_info->version == TLS_1_2_VERSION)
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
break;
}
return false;
}
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv); void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv); int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
@ -16,26 +49,36 @@ struct mlx5e_ktls_resync_resp *
mlx5e_ktls_rx_resync_create_resp_list(void); mlx5e_ktls_rx_resync_create_resp_list(void);
void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list); void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list);
static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev)
{ {
return !is_kdump_kernel() && return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx);
mlx5_accel_is_ktls_tx(mdev);
} }
static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
{ {
return !is_kdump_kernel() && return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_rx);
mlx5_accel_is_ktls_rx(mdev);
} }
static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) struct mlx5e_tls_sw_stats {
{ atomic64_t tx_tls_ctx;
return !is_kdump_kernel() && atomic64_t tx_tls_del;
mlx5_accel_is_ktls_device(mdev); atomic64_t rx_tls_ctx;
} atomic64_t rx_tls_del;
};
struct mlx5e_tls {
struct mlx5e_tls_sw_stats sw_stats;
struct workqueue_struct *rx_wq;
};
int mlx5e_ktls_init(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup(struct mlx5e_priv *priv);
int mlx5e_ktls_get_count(struct mlx5e_priv *priv);
int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data);
#else #else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{ {
} }
@ -64,10 +107,23 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
static inline void static inline void
mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {} mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {}
static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) { return false; } static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) { return false; } {
static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; } return false;
}
static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { }
static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; }
static inline int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
{
return 0;
}
static inline int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
{
return 0;
}
#endif #endif
#endif /* __MLX5E_TLS_H__ */ #endif /* __MLX5E_TLS_H__ */

View File

@ -3,7 +3,7 @@
#include <net/inet6_hashtables.h> #include <net/inet6_hashtables.h>
#include "en_accel/en_accel.h" #include "en_accel/en_accel.h"
#include "en_accel/tls.h" #include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h" #include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h" #include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h" #include "en_accel/fs_tcp.h"

View File

@ -36,14 +36,7 @@
#include "en.h" #include "en.h"
#include "fpga/sdk.h" #include "fpga/sdk.h"
#include "en_accel/tls.h" #include "en_accel/ktls.h"
static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
};
static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = { static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) }, { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
@ -55,51 +48,43 @@ static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv) int mlx5e_ktls_get_count(struct mlx5e_priv *priv)
{
if (!priv->tls)
return NULL;
if (mlx5e_accel_is_ktls_device(priv->mdev))
return mlx5e_ktls_sw_stats_desc;
return mlx5e_tls_sw_stats_desc;
}
int mlx5e_tls_get_count(struct mlx5e_priv *priv)
{ {
if (!priv->tls) if (!priv->tls)
return 0; return 0;
if (mlx5e_accel_is_ktls_device(priv->mdev))
return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc); return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
} }
int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
{ {
const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0; unsigned int i, n, idx = 0;
stats_desc = get_tls_atomic_stats(priv); if (!priv->tls)
n = mlx5e_tls_get_count(priv); return 0;
n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, strcpy(data + (idx++) * ETH_GSTRING_LEN,
stats_desc[i].format); mlx5e_ktls_sw_stats_desc[i].format);
return n; return n;
} }
int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
{ {
const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0; unsigned int i, n, idx = 0;
stats_desc = get_tls_atomic_stats(priv); if (!priv->tls)
n = mlx5e_tls_get_count(priv); return 0;
n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
data[idx++] = data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, mlx5e_ktls_sw_stats_desc,
stats_desc, i); i);
return n; return n;
} }

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies. // Copyright (c) 2019 Mellanox Technologies.
#include "en_accel/tls.h" #include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h" #include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h" #include "en_accel/ktls_utils.h"
@ -27,7 +27,7 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
{ {
u16 num_dumps, stop_room = 0; u16 num_dumps, stop_room = 0;
if (!mlx5e_accel_is_ktls_tx(mdev)) if (!mlx5e_is_ktls_tx(mdev))
return 0; return 0;
num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE); num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
@ -448,14 +448,26 @@ err_out:
return MLX5E_KTLS_SYNC_FAIL; return MLX5E_KTLS_SYNC_FAIL;
} }
bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct sk_buff *skb, int datalen, struct sk_buff *skb,
struct mlx5e_accel_tx_tls_state *state) struct mlx5e_accel_tx_tls_state *state)
{ {
struct mlx5e_ktls_offload_context_tx *priv_tx; struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats; struct mlx5e_sq_stats *stats = sq->stats;
struct tls_context *tls_ctx;
int datalen;
u32 seq; u32 seq;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen)
return true;
mlx5e_tx_mpwqe_ensure_complete(sq);
tls_ctx = tls_get_ctx(skb->sk);
if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) { if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {

View File

@ -16,8 +16,8 @@ struct mlx5e_accel_tx_tls_state {
u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct sk_buff *skb, int datalen, struct sk_buff *skb,
struct mlx5e_accel_tx_tls_state *state); struct mlx5e_accel_tx_tls_state *state);
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt); struct mlx5_cqe64 *cqe, u32 *cqe_bcnt);
@ -48,6 +48,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
{ {
return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state); return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
} }
static inline bool mlx5e_ktls_skb_offloaded(struct sk_buff *skb)
{
return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
}
static inline void
mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
struct mlx5e_accel_tx_tls_state *state)
{
cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
}
#else #else
static inline bool static inline bool
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq, mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
@ -69,6 +81,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
return false; return false;
} }
static inline u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
return 0;
}
static inline void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe,
u32 *cqe_bcnt)
{
}
#endif /* CONFIG_MLX5_EN_TLS */ #endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_TXRX_H__ */ #endif /* __MLX5E_TLS_TXRX_H__ */

View File

@ -6,7 +6,6 @@
#include <net/tls.h> #include <net/tls.h>
#include "en.h" #include "en.h"
#include "accel/tls.h"
enum { enum {
MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0, MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,

View File

@ -1,247 +0,0 @@
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/netdevice.h>
#include <net/ipv6.h>
#include "en_accel/tls.h"
#include "accel/tls.h"
static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
MLX5_SET(tls_flow, flow, ipv6, 0);
memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4),
&inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
}
#if IS_ENABLED(CONFIG_IPV6)
static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
MLX5_SET(tls_flow, flow, ipv6, 1);
memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6),
&np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
}
#endif
static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport,
MLX5_FLD_SZ_BYTES(tls_flow, src_port));
memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport,
MLX5_FLD_SZ_BYTES(tls_flow, dst_port));
}
static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps)
{
switch (sk->sk_family) {
case AF_INET:
mlx5e_tls_set_ipv4_flow(flow, sk);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
if (!sk->sk_ipv6only &&
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
mlx5e_tls_set_ipv4_flow(flow, sk);
break;
}
if (!(caps & MLX5_ACCEL_TLS_IPV6))
goto error_out;
mlx5e_tls_set_ipv6_flow(flow, sk);
break;
#endif
default:
goto error_out;
}
mlx5e_tls_set_flow_tcp_ports(flow, sk);
return 0;
error_out:
return -EINVAL;
}
static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
enum tls_offload_ctx_dir direction,
struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct mlx5_core_dev *mdev = priv->mdev;
u32 caps = mlx5_accel_tls_device_caps(mdev);
int ret = -ENOMEM;
void *flow;
u32 swid;
flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
if (!flow)
return ret;
ret = mlx5e_tls_set_flow(flow, sk, caps);
if (ret)
goto free_flow;
ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
start_offload_tcp_sn, &swid,
direction == TLS_OFFLOAD_CTX_DIR_TX);
if (ret < 0)
goto free_flow;
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
struct mlx5e_tls_offload_context_tx *tx_ctx =
mlx5e_get_tls_tx_context(tls_ctx);
tx_ctx->swid = htonl(swid);
tx_ctx->expected_seq = start_offload_tcp_sn;
} else {
struct mlx5e_tls_offload_context_rx *rx_ctx =
mlx5e_get_tls_rx_context(tls_ctx);
rx_ctx->handle = htonl(swid);
}
return 0;
free_flow:
kfree(flow);
return ret;
}
static void mlx5e_tls_del(struct net_device *netdev,
struct tls_context *tls_ctx,
enum tls_offload_ctx_dir direction)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
unsigned int handle;
handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
mlx5e_get_tls_tx_context(tls_ctx)->swid :
mlx5e_get_tls_rx_context(tls_ctx)->handle);
mlx5_accel_tls_del_flow(priv->mdev, handle,
direction == TLS_OFFLOAD_CTX_DIR_TX);
}
static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
u32 seq, u8 *rcd_sn_data,
enum tls_offload_ctx_dir direction)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_rx *rx_ctx;
__be64 rcd_sn = *(__be64 *)rcd_sn_data;
if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
return -EINVAL;
rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
be64_to_cpu(rcd_sn));
mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
return 0;
}
static const struct tlsdev_ops mlx5e_tls_ops = {
.tls_dev_add = mlx5e_tls_add,
.tls_dev_del = mlx5e_tls_del,
.tls_dev_resync = mlx5e_tls_resync,
};
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
u32 caps;
if (mlx5e_accel_is_ktls_device(priv->mdev)) {
mlx5e_ktls_build_netdev(priv);
return;
}
/* FPGA */
if (!mlx5e_accel_is_tls_device(priv->mdev))
return;
caps = mlx5_accel_tls_device_caps(priv->mdev);
if (caps & MLX5_ACCEL_TLS_TX) {
netdev->features |= NETIF_F_HW_TLS_TX;
netdev->hw_features |= NETIF_F_HW_TLS_TX;
}
if (caps & MLX5_ACCEL_TLS_RX) {
netdev->features |= NETIF_F_HW_TLS_RX;
netdev->hw_features |= NETIF_F_HW_TLS_RX;
}
if (!(caps & MLX5_ACCEL_TLS_LRO)) {
netdev->features &= ~NETIF_F_LRO;
netdev->hw_features &= ~NETIF_F_LRO;
}
netdev->tlsdev_ops = &mlx5e_tls_ops;
}
int mlx5e_tls_init(struct mlx5e_priv *priv)
{
struct mlx5e_tls *tls;
if (!mlx5e_accel_is_tls_device(priv->mdev))
return 0;
tls = kzalloc(sizeof(*tls), GFP_KERNEL);
if (!tls)
return -ENOMEM;
priv->tls = tls;
return 0;
}
void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_tls *tls = priv->tls;
if (!tls)
return;
kfree(tls);
priv->tls = NULL;
}

View File

@ -1,132 +0,0 @@
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __MLX5E_TLS_H__
#define __MLX5E_TLS_H__
#include "accel/tls.h"
#include "en_accel/ktls.h"
#ifdef CONFIG_MLX5_EN_TLS
#include <net/tls.h>
#include "en.h"
struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_ctx;
atomic64_t tx_tls_del;
atomic64_t tx_tls_drop_metadata;
atomic64_t tx_tls_drop_resync_alloc;
atomic64_t tx_tls_drop_no_sync_data;
atomic64_t tx_tls_drop_bypass_required;
atomic64_t rx_tls_ctx;
atomic64_t rx_tls_del;
atomic64_t rx_tls_drop_resync_request;
atomic64_t rx_tls_resync_request;
atomic64_t rx_tls_resync_reply;
atomic64_t rx_tls_auth_fail;
};
struct mlx5e_tls {
struct mlx5e_tls_sw_stats sw_stats;
struct workqueue_struct *rx_wq;
};
struct mlx5e_tls_offload_context_tx {
struct tls_offload_context_tx base;
u32 expected_seq;
__be32 swid;
};
static inline struct mlx5e_tls_offload_context_tx *
mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
{
BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) >
TLS_OFFLOAD_CONTEXT_SIZE_TX);
return container_of(tls_offload_ctx_tx(tls_ctx),
struct mlx5e_tls_offload_context_tx,
base);
}
struct mlx5e_tls_offload_context_rx {
struct tls_offload_context_rx base;
__be32 handle;
};
static inline struct mlx5e_tls_offload_context_rx *
mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
{
BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
TLS_OFFLOAD_CONTEXT_SIZE_RX);
return container_of(tls_offload_ctx_rx(tls_ctx),
struct mlx5e_tls_offload_context_rx,
base);
}
static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv)
{
return priv->tls;
}
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv);
int mlx5e_tls_init(struct mlx5e_priv *priv);
void mlx5e_tls_cleanup(struct mlx5e_priv *priv);
int mlx5e_tls_get_count(struct mlx5e_priv *priv);
int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev)
{
return !is_kdump_kernel() &&
mlx5_accel_is_tls_device(mdev);
}
#else
static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{
if (!is_kdump_kernel() &&
mlx5_accel_is_ktls_device(priv->mdev))
mlx5e_ktls_build_netdev(priv);
}
static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) { return false; }
static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
#endif
#endif /* __MLX5E_TLS_H__ */

View File

@ -1,390 +0,0 @@
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include "en_accel/tls.h"
#include "en_accel/tls_rxtx.h"
#include "accel/accel.h"
#include <net/inet6_hashtables.h>
#include <linux/ipv6.h>
#define SYNDROM_DECRYPTED 0x30
#define SYNDROM_RESYNC_REQUEST 0x31
#define SYNDROM_AUTH_FAILED 0x32
#define SYNDROME_OFFLOAD_REQUIRED 32
#define SYNDROME_SYNC 33
struct sync_info {
u64 rcd_sn;
s32 sync_len;
int nr_frags;
skb_frag_t frags[MAX_SKB_FRAGS];
};
struct recv_metadata_content {
u8 syndrome;
u8 reserved;
__be32 sync_seq;
} __packed;
struct send_metadata_content {
/* One byte of syndrome followed by 3 bytes of swid */
__be32 syndrome_swid;
__be16 first_seq;
} __packed;
struct mlx5e_tls_metadata {
union {
/* from fpga to host */
struct recv_metadata_content recv;
/* from host to fpga */
struct send_metadata_content send;
unsigned char raw[6];
} __packed content;
/* packet type ID field */
__be16 ethertype;
} __packed;
static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
{
struct mlx5e_tls_metadata *pet;
struct ethhdr *eth;
if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
return -ENOMEM;
eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
pet = (struct mlx5e_tls_metadata *)(eth + 1);
memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
2 * ETH_ALEN);
eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
pet->content.send.syndrome_swid =
htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
return 0;
}
static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
u32 tcp_seq, struct sync_info *info)
{
int remaining, i = 0, ret = -EINVAL;
struct tls_record_info *record;
unsigned long flags;
s32 sync_size;
spin_lock_irqsave(&context->base.lock, flags);
record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
if (unlikely(!record))
goto out;
sync_size = tcp_seq - tls_record_start_seq(record);
info->sync_len = sync_size;
if (unlikely(sync_size < 0)) {
if (tls_record_is_start_marker(record))
goto done;
goto out;
}
remaining = sync_size;
while (remaining > 0) {
info->frags[i] = record->frags[i];
__skb_frag_ref(&info->frags[i]);
remaining -= skb_frag_size(&info->frags[i]);
if (remaining < 0)
skb_frag_size_add(&info->frags[i], remaining);
i++;
}
info->nr_frags = i;
done:
ret = 0;
out:
spin_unlock_irqrestore(&context->base.lock, flags);
return ret;
}
static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
struct sk_buff *nskb, u32 tcp_seq,
int headln, __be64 rcd_sn)
{
struct mlx5e_tls_metadata *pet;
u8 syndrome = SYNDROME_SYNC;
struct iphdr *iph;
struct tcphdr *th;
int data_len, mss;
nskb->dev = skb->dev;
skb_reset_mac_header(nskb);
skb_set_network_header(nskb, skb_network_offset(skb));
skb_set_transport_header(nskb, skb_transport_offset(skb));
memcpy(nskb->data, skb->data, headln);
memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
iph = ip_hdr(nskb);
iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
th = tcp_hdr(nskb);
data_len = nskb->len - headln;
tcp_seq -= data_len;
th->seq = htonl(tcp_seq);
mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
skb_shinfo(nskb)->gso_size = 0;
if (data_len > mss) {
skb_shinfo(nskb)->gso_size = mss;
skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
}
skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
memcpy(pet, &syndrome, sizeof(syndrome));
pet->content.send.first_seq = htons(tcp_seq);
/* MLX5 devices don't care about the checksum partial start, offset
* and pseudo header
*/
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->queue_mapping = skb->queue_mapping;
}
static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tls *tls)
{
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
struct sync_info info;
struct sk_buff *nskb;
int linear_len = 0;
int headln;
int i;
sq->stats->tls_ooo++;
if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
/* We might get here if a retransmission reaches the driver
* after the relevant record is acked.
* It should be safe to drop the packet in this case
*/
atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data);
goto err_out;
}
if (unlikely(info.sync_len < 0)) {
u32 payload;
headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
payload = skb->len - headln;
if (likely(payload <= -info.sync_len))
/* SKB payload doesn't require offload
*/
return true;
atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
goto err_out;
}
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata);
goto err_out;
}
headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
linear_len += headln + sizeof(info.rcd_sn);
nskb = alloc_skb(linear_len, GFP_ATOMIC);
if (unlikely(!nskb)) {
atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc);
goto err_out;
}
context->expected_seq = tcp_seq + skb->len - headln;
skb_put(nskb, linear_len);
for (i = 0; i < info.nr_frags; i++)
skb_shinfo(nskb)->frags[i] = info.frags[i];
skb_shinfo(nskb)->nr_frags = info.nr_frags;
nskb->data_len = info.sync_len;
nskb->len += info.sync_len;
sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
mlx5e_sq_xmit_simple(sq, nskb, true);
return true;
err_out:
dev_kfree_skb_any(skb);
return false;
}
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_tx *context;
struct tls_context *tls_ctx;
u32 expected_seq;
int datalen;
u32 skb_seq;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen)
return true;
mlx5e_tx_mpwqe_ensure_complete(sq);
tls_ctx = tls_get_ctx(skb->sk);
if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out;
if (mlx5e_accel_is_ktls_tx(sq->mdev))
return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
/* FPGA */
skb_seq = ntohl(tcp_hdr(skb)->seq);
context = mlx5e_get_tls_tx_context(tls_ctx);
expected_seq = context->expected_seq;
if (unlikely(expected_seq != skb_seq))
return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls);
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
dev_kfree_skb_any(skb);
return false;
}
context->expected_seq = skb_seq + datalen;
return true;
err_out:
dev_kfree_skb_any(skb);
return false;
}
static int tls_update_resync_sn(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5e_tls_metadata *mdata)
{
struct sock *sk = NULL;
struct iphdr *iph;
struct tcphdr *th;
__be32 seq;
if (mdata->ethertype != htons(ETH_P_IP))
return -EINVAL;
iph = (struct iphdr *)(mdata + 1);
th = ((void *)iph) + iph->ihl * 4;
if (iph->version == 4) {
sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
iph->saddr, th->source, iph->daddr,
th->dest, netdev->ifindex);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
&ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
#endif
}
if (!sk || sk->sk_state == TCP_TIME_WAIT) {
struct mlx5e_priv *priv = netdev_priv(netdev);
atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
goto out;
}
skb->sk = sk;
skb->destructor = sock_edemux;
memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
tls_offload_rx_resync_request(sk, seq);
out:
return 0;
}
/* FPGA tls rx handler */
void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
u32 *cqe_bcnt)
{
struct mlx5e_tls_metadata *mdata;
struct mlx5e_priv *priv;
/* Use the metadata */
mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
switch (mdata->content.recv.syndrome) {
case SYNDROM_DECRYPTED:
skb->decrypted = 1;
break;
case SYNDROM_RESYNC_REQUEST:
tls_update_resync_sn(rq->netdev, skb, mdata);
priv = netdev_priv(rq->netdev);
atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
break;
case SYNDROM_AUTH_FAILED:
/* Authentication failure will be observed and verified by kTLS */
priv = netdev_priv(rq->netdev);
atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
break;
default:
/* Bypass the metadata header to others */
return;
}
remove_metadata_hdr(skb);
*cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
}
u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
if (!mlx5e_accel_is_tls_device(mdev))
return 0;
if (mlx5e_accel_is_ktls_device(mdev))
return mlx5e_ktls_get_stop_room(mdev, params);
/* FPGA */
/* Resync SKB. */
return mlx5e_stop_room_for_max_wqe(mdev);
}

View File

@ -1,91 +0,0 @@
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __MLX5E_TLS_RXTX_H__
#define __MLX5E_TLS_RXTX_H__
#include "accel/accel.h"
#include "en_accel/ktls_txrx.h"
#ifdef CONFIG_MLX5_EN_TLS
#include <linux/skbuff.h>
#include "en.h"
#include "en/txrx.h"
u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
static inline bool mlx5e_tls_skb_offloaded(struct sk_buff *skb)
{
return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
}
static inline void
mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
struct mlx5e_accel_tx_tls_state *state)
{
cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
}
void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
u32 *cqe_bcnt);
static inline void
mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
{
if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */
return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt);
if (unlikely(test_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state) && is_metadata_hdr_valid(skb)))
return mlx5e_tls_handle_rx_skb_metadata(rq, skb, cqe_bcnt);
}
#else
static inline bool
mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; }
static inline void
mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {}
static inline u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
return 0;
}
#endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_RXTX_H__ */

View File

@ -47,9 +47,8 @@
#include "en_rep.h" #include "en_rep.h"
#include "en_accel/ipsec.h" #include "en_accel/ipsec.h"
#include "en_accel/en_accel.h" #include "en_accel/en_accel.h"
#include "en_accel/tls.h" #include "en_accel/ktls.h"
#include "accel/ipsec.h" #include "en_accel/ipsec_offload.h"
#include "accel/tls.h"
#include "lib/vxlan.h" #include "lib/vxlan.h"
#include "lib/clock.h" #include "lib/clock.h"
#include "en/port.h" #include "en/port.h"
@ -68,7 +67,6 @@
#include "en/ptp.h" #include "en/ptp.h"
#include "qos.h" #include "qos.h"
#include "en/trap.h" #include "en/trap.h"
#include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{ {
@ -1036,9 +1034,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (err) if (err)
goto err_destroy_rq; goto err_destroy_rq;
if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev))
__set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */
if (MLX5_CAP_ETH(mdev, cqe_checksum_full)) if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state); __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
@ -1334,7 +1329,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (MLX5_IPSEC_DEV(c->priv->mdev)) if (mlx5_ipsec_device_caps(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (param->is_mpw) if (param->is_mpw)
set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
@ -4471,12 +4466,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return -EINVAL; return -EINVAL;
} }
if (mlx5_fpga_is_ipsec_device(priv->mdev)) {
netdev_warn(netdev,
"XDP is not available on Innova cards with IPsec support\n");
return -EINVAL;
}
new_params = priv->channels.params; new_params = priv->channels.params;
new_params.xdp_prog = prog; new_params.xdp_prog = prog;
@ -4934,7 +4923,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_set_netdev_dev_addr(netdev); mlx5e_set_netdev_dev_addr(netdev);
mlx5e_ipsec_build_netdev(priv); mlx5e_ipsec_build_netdev(priv);
mlx5e_tls_build_netdev(priv); mlx5e_ktls_build_netdev(priv);
} }
void mlx5e_create_q_counters(struct mlx5e_priv *priv) void mlx5e_create_q_counters(struct mlx5e_priv *priv)
@ -4996,7 +4985,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err) if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
err = mlx5e_tls_init(priv); err = mlx5e_ktls_init(priv);
if (err) if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
@ -5007,7 +4996,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{ {
mlx5e_health_destroy_reporters(priv); mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv); mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv); mlx5e_ipsec_cleanup(priv);
mlx5e_fs_cleanup(priv); mlx5e_fs_cleanup(priv);
} }
@ -5704,7 +5693,6 @@ int mlx5e_init(void)
{ {
int ret; int ret;
mlx5e_ipsec_build_inverse_table();
mlx5e_build_ptys2ethtool_map(); mlx5e_build_ptys2ethtool_map();
ret = auxiliary_driver_register(&mlx5e_driver); ret = auxiliary_driver_register(&mlx5e_driver);
if (ret) if (ret)

View File

@ -1112,7 +1112,6 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(per_port_buff_congest), &MLX5E_STATS_GRP(per_port_buff_congest),
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_sw), &MLX5E_STATS_GRP(ipsec_sw),
&MLX5E_STATS_GRP(ipsec_hw),
#endif #endif
&MLX5E_STATS_GRP(ptp), &MLX5E_STATS_GRP(ptp),
}; };

View File

@ -48,10 +48,9 @@
#include "en_rep.h" #include "en_rep.h"
#include "en/rep/tc.h" #include "en/rep/tc.h"
#include "ipoib/ipoib.h" #include "ipoib/ipoib.h"
#include "accel/ipsec.h" #include "en_accel/ipsec_offload.h"
#include "fpga/ipsec.h"
#include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h" #include "en_accel/ktls_txrx.h"
#include "en/xdp.h" #include "en/xdp.h"
#include "en/xsk/rx.h" #include "en/xsk/rx.h"
#include "en/health.h" #include "en/health.h"
@ -1416,7 +1415,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
skb->mac_len = ETH_HLEN; skb->mac_len = ETH_HLEN;
mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); if (unlikely(get_cqe_tls_offload(cqe)))
mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe); mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
@ -2383,46 +2383,6 @@ const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
}; };
#endif /* CONFIG_MLX5_CORE_IPOIB */ #endif /* CONFIG_MLX5_CORE_IPOIB */
#ifdef CONFIG_MLX5_EN_IPSEC
static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
struct sk_buff *skb;
u32 cqe_bcnt;
u16 ci;
ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
rq->stats->wqe_err++;
goto wq_free_wqe;
}
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
rq, cqe, wi, cqe_bcnt);
if (unlikely(!skb)) /* a DROP, save the page-reuse checks */
goto wq_free_wqe;
skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
if (unlikely(!skb))
goto wq_free_wqe;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
wq_free_wqe:
mlx5e_free_rx_wqe(rq, wi, true);
mlx5_wq_cyc_pop(wq);
}
#endif /* CONFIG_MLX5_EN_IPSEC */
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
{ {
struct net_device *netdev = rq->netdev; struct net_device *netdev = rq->netdev;
@ -2439,10 +2399,6 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->post_wqes = mlx5e_post_rx_mpwqes; rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
if (mlx5_fpga_is_ipsec_device(mdev)) {
netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
return -EINVAL;
}
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo; rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
if (!rq->handle_rx_cqe) { if (!rq->handle_rx_cqe) {
@ -2466,13 +2422,6 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
mlx5e_skb_from_cqe_nonlinear; mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes; rq->post_wqes = mlx5e_post_rx_wqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
#ifdef CONFIG_MLX5_EN_IPSEC
if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
priv->ipsec)
rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
else
#endif
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
if (!rq->handle_rx_cqe) { if (!rq->handle_rx_cqe) {
netdev_err(netdev, "RX handler of RQ is not set\n"); netdev_err(netdev, "RX handler of RQ is not set\n");

View File

@ -32,7 +32,7 @@
#include "lib/mlx5.h" #include "lib/mlx5.h"
#include "en.h" #include "en.h"
#include "en_accel/tls.h" #include "en_accel/ktls.h"
#include "en_accel/en_accel.h" #include "en_accel/en_accel.h"
#include "en/ptp.h" #include "en/ptp.h"
#include "en/port.h" #include "en/port.h"
@ -1900,17 +1900,17 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
{ {
return mlx5e_tls_get_count(priv); return mlx5e_ktls_get_count(priv);
} }
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls) static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
{ {
return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN); return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
} }
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls) static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
{ {
return idx + mlx5e_tls_get_stats(priv, data + idx); return idx + mlx5e_ktls_get_stats(priv, data + idx);
} }
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
@ -2443,7 +2443,6 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(pme), &MLX5E_STATS_GRP(pme),
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_sw), &MLX5E_STATS_GRP(ipsec_sw),
&MLX5E_STATS_GRP(ipsec_hw),
#endif #endif
&MLX5E_STATS_GRP(tls), &MLX5E_STATS_GRP(tls),
&MLX5E_STATS_GRP(channels), &MLX5E_STATS_GRP(channels),

View File

@ -482,7 +482,6 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
extern MLX5E_DECLARE_STATS_GRP(pme); extern MLX5E_DECLARE_STATS_GRP(pme);
extern MLX5E_DECLARE_STATS_GRP(channels); extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest); extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw); extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
extern MLX5E_DECLARE_STATS_GRP(ptp); extern MLX5E_DECLARE_STATS_GRP(ptp);

View File

@ -57,9 +57,6 @@ struct mlx5_fpga_device {
u32 mkey; u32 mkey;
struct mlx5_uars_page *uar; struct mlx5_uars_page *uar;
} conn_res; } conn_res;
struct mlx5_fpga_ipsec *ipsec;
struct mlx5_fpga_tls *tls;
}; };
#define mlx5_fpga_dbg(__adev, format, ...) \ #define mlx5_fpga_dbg(__adev, format, ...) \

File diff suppressed because it is too large Load Diff

View File

@ -1,62 +0,0 @@
/*
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __MLX5_FPGA_IPSEC_H__
#define __MLX5_FPGA_IPSEC_H__
#include "accel/ipsec.h"
#include "fs_cmd.h"
#ifdef CONFIG_MLX5_FPGA_IPSEC
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev);
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
void mlx5_fpga_ipsec_build_fs_cmds(void);
bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev);
#else
static inline
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
{ return NULL; }
static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
{
return mlx5_fs_cmd_get_default(type);
}
static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; }
#endif /* CONFIG_MLX5_FPGA_IPSEC */
#endif /* __MLX5_FPGA_IPSEC_H__ */

View File

@ -1,622 +0,0 @@
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/mlx5/device.h>
#include "fpga/tls.h"
#include "fpga/cmd.h"
#include "fpga/sdk.h"
#include "fpga/core.h"
#include "accel/tls.h"
struct mlx5_fpga_tls_command_context;
typedef void (*mlx5_fpga_tls_command_complete)
(struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev,
struct mlx5_fpga_tls_command_context *ctx,
struct mlx5_fpga_dma_buf *resp);
struct mlx5_fpga_tls_command_context {
struct list_head list;
/* There is no guarantee on the order between the TX completion
* and the command response.
* The TX completion is going to touch cmd->buf even in
* the case of successful transmission.
* So instead of requiring separate allocations for cmd
* and cmd->buf we've decided to use a reference counter
*/
refcount_t ref;
struct mlx5_fpga_dma_buf buf;
mlx5_fpga_tls_command_complete complete;
};
static void
mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx)
{
if (refcount_dec_and_test(&ctx->ref))
kfree(ctx);
}
static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev,
struct mlx5_fpga_dma_buf *resp)
{
struct mlx5_fpga_conn *conn = fdev->tls->conn;
struct mlx5_fpga_tls_command_context *ctx;
struct mlx5_fpga_tls *tls = fdev->tls;
unsigned long flags;
spin_lock_irqsave(&tls->pending_cmds_lock, flags);
ctx = list_first_entry(&tls->pending_cmds,
struct mlx5_fpga_tls_command_context, list);
list_del(&ctx->list);
spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
ctx->complete(conn, fdev, ctx, resp);
}
static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_device *fdev,
struct mlx5_fpga_dma_buf *buf,
u8 status)
{
struct mlx5_fpga_tls_command_context *ctx =
container_of(buf, struct mlx5_fpga_tls_command_context, buf);
mlx5_fpga_tls_put_command_ctx(ctx);
if (unlikely(status))
mlx5_fpga_tls_cmd_complete(fdev, NULL);
}
static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
struct mlx5_fpga_tls_command_context *cmd,
mlx5_fpga_tls_command_complete complete)
{
struct mlx5_fpga_tls *tls = fdev->tls;
unsigned long flags;
int ret;
refcount_set(&cmd->ref, 2);
cmd->complete = complete;
cmd->buf.complete = mlx5_fpga_cmd_send_complete;
spin_lock_irqsave(&tls->pending_cmds_lock, flags);
/* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock
* to make sure commands are inserted to the tls->pending_cmds list
* and the command QP in the same order.
*/
ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf);
if (likely(!ret))
list_add_tail(&cmd->list, &tls->pending_cmds);
else
complete(tls->conn, fdev, cmd, NULL);
spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
}
/* Start of context identifiers range (inclusive) */
#define SWID_START 0
/* End of context identifiers range (exclusive) */
#define SWID_END BIT(24)
static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
void *ptr)
{
unsigned long flags;
int ret;
/* TLS metadata format is 1 byte for syndrome followed
* by 3 bytes of swid (software ID)
* swid must not exceed 3 bytes.
* See tls_rxtx.c:insert_pet() for details
*/
BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
idr_preload(GFP_KERNEL);
spin_lock_irqsave(idr_spinlock, flags);
ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
spin_unlock_irqrestore(idr_spinlock, flags);
idr_preload_end();
return ret;
}
static void *mlx5_fpga_tls_release_swid(struct idr *idr,
spinlock_t *idr_spinlock, u32 swid)
{
unsigned long flags;
void *ptr;
spin_lock_irqsave(idr_spinlock, flags);
ptr = idr_remove(idr, swid);
spin_unlock_irqrestore(idr_spinlock, flags);
return ptr;
}
static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_device *fdev,
struct mlx5_fpga_dma_buf *buf, u8 status)
{
kfree(buf);
}
static void
mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_device *fdev,
struct mlx5_fpga_tls_command_context *cmd,
struct mlx5_fpga_dma_buf *resp)
{
if (resp) {
u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
if (syndrome)
mlx5_fpga_err(fdev,
"Teardown stream failed with syndrome = %d",
syndrome);
}
mlx5_fpga_tls_put_command_ctx(cmd);
}
static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
{
memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow,
MLX5_BYTE_OFF(tls_flow, ipv6));
MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6));
MLX5_SET(tls_cmd, cmd, direction_sx,
MLX5_GET(tls_flow, flow, direction_sx));
}
int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
u32 seq, __be64 rcd_sn)
{
struct mlx5_fpga_dma_buf *buf;
int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
void *flow;
void *cmd;
int ret;
buf = kzalloc(size, GFP_ATOMIC);
if (!buf)
return -ENOMEM;
cmd = (buf + 1);
rcu_read_lock();
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
if (unlikely(!flow)) {
rcu_read_unlock();
WARN_ONCE(1, "Received NULL pointer for handle\n");
kfree(buf);
return -EINVAL;
}
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
rcu_read_unlock();
MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
buf->sg[0].data = cmd;
buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
buf->complete = mlx_tls_kfree_complete;
ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
if (ret < 0)
kfree(buf);
return ret;
}
static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
void *flow, u32 swid, gfp_t flags)
{
struct mlx5_fpga_tls_command_context *ctx;
struct mlx5_fpga_dma_buf *buf;
void *cmd;
ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags);
if (!ctx)
return;
buf = &ctx->buf;
cmd = (ctx + 1);
MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
MLX5_SET(tls_cmd, cmd, swid, swid);
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
kfree(flow);
buf->sg[0].data = cmd;
buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
mlx5_fpga_tls_teardown_completion);
}
void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
gfp_t flags, bool direction_sx)
{
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
void *flow;
if (direction_sx)
flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
&tls->tx_idr_spinlock,
swid);
else
flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
&tls->rx_idr_spinlock,
swid);
if (!flow) {
mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
swid);
return;
}
synchronize_rcu(); /* before kfree(flow) */
mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
}
enum mlx5_fpga_setup_stream_status {
MLX5_FPGA_CMD_PENDING,
MLX5_FPGA_CMD_SEND_FAILED,
MLX5_FPGA_CMD_RESPONSE_RECEIVED,
MLX5_FPGA_CMD_ABANDONED,
};
struct mlx5_setup_stream_context {
struct mlx5_fpga_tls_command_context cmd;
atomic_t status;
u32 syndrome;
struct completion comp;
};
static void
mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_device *fdev,
struct mlx5_fpga_tls_command_context *cmd,
struct mlx5_fpga_dma_buf *resp)
{
struct mlx5_setup_stream_context *ctx =
container_of(cmd, struct mlx5_setup_stream_context, cmd);
int status = MLX5_FPGA_CMD_SEND_FAILED;
void *tls_cmd = ctx + 1;
/* If we failed to send to command resp == NULL */
if (resp) {
ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
status = MLX5_FPGA_CMD_RESPONSE_RECEIVED;
}
status = atomic_xchg_release(&ctx->status, status);
if (likely(status != MLX5_FPGA_CMD_ABANDONED)) {
complete(&ctx->comp);
return;
}
mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n",
ctx->syndrome);
if (!ctx->syndrome) {
/* The process was killed while waiting for the context to be
* added, and the add completed successfully.
* We need to destroy the HW context, and we can't can't reuse
* the command context because we might not have received
* the tx completion yet.
*/
mlx5_fpga_tls_del_flow(fdev->mdev,
MLX5_GET(tls_cmd, tls_cmd, swid),
GFP_ATOMIC,
MLX5_GET(tls_cmd, tls_cmd,
direction_sx));
}
mlx5_fpga_tls_put_command_ctx(cmd);
}
static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
struct mlx5_setup_stream_context *ctx)
{
struct mlx5_fpga_dma_buf *buf;
void *cmd = ctx + 1;
int status, ret = 0;
buf = &ctx->cmd.buf;
buf->sg[0].data = cmd;
buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM);
init_completion(&ctx->comp);
atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING);
ctx->syndrome = -1;
mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
mlx5_fpga_tls_setup_completion);
wait_for_completion_killable(&ctx->comp);
status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED);
if (unlikely(status == MLX5_FPGA_CMD_PENDING))
/* ctx is going to be released in mlx5_fpga_tls_setup_completion */
return -EINTR;
if (unlikely(ctx->syndrome))
ret = -ENOMEM;
mlx5_fpga_tls_put_command_ctx(&ctx->cmd);
return ret;
}
static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg,
struct mlx5_fpga_dma_buf *buf)
{
struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg;
mlx5_fpga_tls_cmd_complete(fdev, buf);
}
bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
{
if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
return false;
if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
return false;
if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS)
return false;
if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
return false;
return true;
}
static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev,
u32 *p_caps)
{
int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap);
u32 caps = 0;
void *buf;
buf = kzalloc(cap_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf);
if (err)
goto out;
if (MLX5_GET(tls_extended_cap, buf, tx))
caps |= MLX5_ACCEL_TLS_TX;
if (MLX5_GET(tls_extended_cap, buf, rx))
caps |= MLX5_ACCEL_TLS_RX;
if (MLX5_GET(tls_extended_cap, buf, tls_v12))
caps |= MLX5_ACCEL_TLS_V12;
if (MLX5_GET(tls_extended_cap, buf, tls_v13))
caps |= MLX5_ACCEL_TLS_V13;
if (MLX5_GET(tls_extended_cap, buf, lro))
caps |= MLX5_ACCEL_TLS_LRO;
if (MLX5_GET(tls_extended_cap, buf, ipv6))
caps |= MLX5_ACCEL_TLS_IPV6;
if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128))
caps |= MLX5_ACCEL_TLS_AES_GCM128;
if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256))
caps |= MLX5_ACCEL_TLS_AES_GCM256;
*p_caps = caps;
err = 0;
out:
kfree(buf);
return err;
}
int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
struct mlx5_fpga_conn_attr init_attr = {0};
struct mlx5_fpga_conn *conn;
struct mlx5_fpga_tls *tls;
int err = 0;
if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
return 0;
tls = kzalloc(sizeof(*tls), GFP_KERNEL);
if (!tls)
return -ENOMEM;
err = mlx5_fpga_tls_get_caps(fdev, &tls->caps);
if (err)
goto error;
if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
err = -ENOTSUPP;
goto error;
}
init_attr.rx_size = SBU_QP_QUEUE_SIZE;
init_attr.tx_size = SBU_QP_QUEUE_SIZE;
init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb;
init_attr.cb_arg = fdev;
conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
if (IS_ERR(conn)) {
err = PTR_ERR(conn);
mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n",
err);
goto error;
}
tls->conn = conn;
spin_lock_init(&tls->pending_cmds_lock);
INIT_LIST_HEAD(&tls->pending_cmds);
idr_init(&tls->tx_idr);
idr_init(&tls->rx_idr);
spin_lock_init(&tls->tx_idr_spinlock);
spin_lock_init(&tls->rx_idr_spinlock);
fdev->tls = tls;
return 0;
error:
kfree(tls);
return err;
}
void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
if (!fdev || !fdev->tls)
return;
mlx5_fpga_sbu_conn_destroy(fdev->tls->conn);
kfree(fdev->tls);
fdev->tls = NULL;
}
static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd,
struct tls_crypto_info *info,
__be64 *rcd_sn)
{
struct tls12_crypto_info_aes_gcm_128 *crypto_info =
(struct tls12_crypto_info_aes_gcm_128 *)info;
memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq,
TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv),
crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key),
crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
/* in AES-GCM 128 we need to write the key twice */
memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) +
TLS_CIPHER_AES_GCM_128_KEY_SIZE,
crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128);
}
static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
struct tls_crypto_info *crypto_info)
{
__be64 rcd_sn;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
if (!(caps & MLX5_ACCEL_TLS_AES_GCM128))
return -EINVAL;
mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn);
break;
default:
return -EINVAL;
}
return 0;
}
static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info,
u32 swid, u32 tcp_sn)
{
u32 caps = mlx5_fpga_tls_device_caps(mdev);
struct mlx5_setup_stream_context *ctx;
int ret = -ENOMEM;
size_t cmd_size;
void *cmd;
cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx);
ctx = kzalloc(cmd_size, GFP_KERNEL);
if (!ctx)
goto out;
cmd = ctx + 1;
ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info);
if (ret)
goto free_ctx;
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
MLX5_SET(tls_cmd, cmd, swid, swid);
MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn);
return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
free_ctx:
kfree(ctx);
out:
return ret;
}
int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn, u32 *p_swid,
bool direction_sx)
{
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
int ret = -ENOMEM;
u32 swid;
if (direction_sx)
ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
&tls->tx_idr_spinlock, flow);
else
ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
&tls->rx_idr_spinlock, flow);
if (ret < 0)
return ret;
swid = ret;
MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
start_offload_tcp_sn);
if (ret && ret != -EINTR)
goto free_swid;
*p_swid = swid;
return 0;
free_swid:
if (direction_sx)
mlx5_fpga_tls_release_swid(&tls->tx_idr,
&tls->tx_idr_spinlock, swid);
else
mlx5_fpga_tls_release_swid(&tls->rx_idr,
&tls->rx_idr_spinlock, swid);
return ret;
}

View File

@ -1,74 +0,0 @@
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __MLX5_FPGA_TLS_H__
#define __MLX5_FPGA_TLS_H__
#include <linux/mlx5/driver.h>
#include <net/tls.h>
#include "fpga/core.h"
struct mlx5_fpga_tls {
struct list_head pending_cmds;
spinlock_t pending_cmds_lock; /* Protects pending_cmds */
u32 caps;
struct mlx5_fpga_conn *conn;
struct idr tx_idr;
struct idr rx_idr;
spinlock_t tx_idr_spinlock; /* protects the IDR */
spinlock_t rx_idr_spinlock; /* protects the IDR */
};
int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn, u32 *p_swid,
bool direction_sx);
void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
gfp_t flags, bool direction_sx);
bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev);
static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
{
return mdev->fpga->tls->caps;
}
int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
u32 seq, __be64 rcd_sn);
#endif /* __MLX5_FPGA_TLS_H__ */

View File

@ -878,9 +878,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
table_type = FS_FT_NIC_RX; table_type = FS_FT_NIC_RX;
break; break;
case MLX5_FLOW_NAMESPACE_EGRESS: case MLX5_FLOW_NAMESPACE_EGRESS:
#ifdef CONFIG_MLX5_IPSEC
case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL: case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
#endif
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions); max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX; table_type = FS_FT_NIC_TX;
break; break;

View File

@ -40,8 +40,6 @@
#include "fs_cmd.h" #include "fs_cmd.h"
#include "fs_ft_pool.h" #include "fs_ft_pool.h"
#include "diag/fs_tracepoint.h" #include "diag/fs_tracepoint.h"
#include "accel/ipsec.h"
#include "fpga/ipsec.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node)) sizeof(struct init_tree_node))
@ -188,24 +186,18 @@ static struct init_tree_node {
static struct init_tree_node egress_root_fs = { static struct init_tree_node egress_root_fs = {
.type = FS_TYPE_NAMESPACE, .type = FS_TYPE_NAMESPACE,
#ifdef CONFIG_MLX5_IPSEC
.ar_size = 2, .ar_size = 2,
#else
.ar_size = 1,
#endif
.children = (struct init_tree_node[]) { .children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS, FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))), BY_PASS_PRIO_NUM_LEVELS))),
#ifdef CONFIG_MLX5_IPSEC
ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0, ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
FS_CHAINING_CAPS_EGRESS, FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS, ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
KERNEL_TX_IPSEC_NUM_LEVELS))), KERNEL_TX_IPSEC_NUM_LEVELS))),
#endif
} }
}; };
@ -2519,10 +2511,6 @@ static struct mlx5_flow_root_namespace
struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
(table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
/* Create the root namespace */ /* Create the root namespace */
root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL); root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
if (!root_ns) if (!root_ns)
@ -3172,8 +3160,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err; goto err;
} }
if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE || if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering); err = init_egress_root_ns(steering);
if (err) if (err)
goto err; goto err;

View File

@ -35,7 +35,6 @@
#include "mlx5_core.h" #include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h" #include "../../mlxfw/mlxfw.h"
#include "lib/tout.h" #include "lib/tout.h"
#include "accel/tls.h"
enum { enum {
MCQS_IDENTIFIER_BOOT_IMG = 0x1, MCQS_IDENTIFIER_BOOT_IMG = 0x1,
@ -249,7 +248,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err; return err;
} }
if (mlx5_accel_is_ktls_tx(dev) || mlx5_accel_is_ktls_rx(dev)) { if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
if (err) if (err)
return err; return err;

View File

@ -62,9 +62,7 @@
#include "lib/mlx5.h" #include "lib/mlx5.h"
#include "lib/tout.h" #include "lib/tout.h"
#include "fpga/core.h" #include "fpga/core.h"
#include "fpga/ipsec.h" #include "en_accel/ipsec_offload.h"
#include "accel/ipsec.h"
#include "accel/tls.h"
#include "lib/clock.h" #include "lib/clock.h"
#include "lib/vxlan.h" #include "lib/vxlan.h"
#include "lib/geneve.h" #include "lib/geneve.h"
@ -1183,14 +1181,6 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fpga_start; goto err_fpga_start;
} }
mlx5_accel_ipsec_init(dev);
err = mlx5_accel_tls_init(dev);
if (err) {
mlx5_core_err(dev, "TLS device start failed %d\n", err);
goto err_tls_start;
}
err = mlx5_init_fs(dev); err = mlx5_init_fs(dev);
if (err) { if (err) {
mlx5_core_err(dev, "Failed to init flow steering\n"); mlx5_core_err(dev, "Failed to init flow steering\n");
@ -1238,9 +1228,6 @@ err_vhca:
err_set_hca: err_set_hca:
mlx5_cleanup_fs(dev); mlx5_cleanup_fs(dev);
err_fs: err_fs:
mlx5_accel_tls_cleanup(dev);
err_tls_start:
mlx5_accel_ipsec_cleanup(dev);
mlx5_fpga_device_stop(dev); mlx5_fpga_device_stop(dev);
err_fpga_start: err_fpga_start:
mlx5_rsc_dump_cleanup(dev); mlx5_rsc_dump_cleanup(dev);
@ -1266,8 +1253,6 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_sf_hw_table_destroy(dev); mlx5_sf_hw_table_destroy(dev);
mlx5_vhca_event_stop(dev); mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev); mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev); mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev); mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca); mlx5_hv_vhca_cleanup(dev->hv_vhca);
@ -1947,7 +1932,6 @@ static int __init init(void)
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id)); get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params(); mlx5_core_verify_params();
mlx5_fpga_ipsec_build_fs_cmds();
mlx5_register_debugfs(); mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver); err = pci_register_driver(&mlx5_core_driver);

View File

@ -87,6 +87,11 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
enable_vfs_hca: enable_vfs_hca:
num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs); num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs);
for (vf = 0; vf < num_vfs; vf++) { for (vf = 0; vf < num_vfs; vf++) {
/* Notify the VF before its enablement to let it set
* some stuff.
*/
blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier,
MLX5_PF_NOTIFY_ENABLE_VF, dev);
err = mlx5_core_enable_hca(dev, vf + 1); err = mlx5_core_enable_hca(dev, vf + 1);
if (err) { if (err) {
mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err); mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
@ -127,6 +132,11 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
for (vf = num_vfs - 1; vf >= 0; vf--) { for (vf = num_vfs - 1; vf >= 0; vf--) {
if (!sriov->vfs_ctx[vf].enabled) if (!sriov->vfs_ctx[vf].enabled)
continue; continue;
/* Notify the VF before its disablement to let it clean
* some resources.
*/
blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier,
MLX5_PF_NOTIFY_DISABLE_VF, dev);
err = mlx5_core_disable_hca(dev, vf + 1); err = mlx5_core_disable_hca(dev, vf + 1);
if (err) { if (err) {
mlx5_core_warn(dev, "failed to disable VF %d\n", vf); mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
@ -257,7 +267,7 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
{ {
struct mlx5_core_sriov *sriov = &dev->priv.sriov; struct mlx5_core_sriov *sriov = &dev->priv.sriov;
struct pci_dev *pdev = dev->pdev; struct pci_dev *pdev = dev->pdev;
int total_vfs; int total_vfs, i;
if (!mlx5_core_is_pf(dev)) if (!mlx5_core_is_pf(dev))
return 0; return 0;
@ -269,6 +279,9 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
if (!sriov->vfs_ctx) if (!sriov->vfs_ctx)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < total_vfs; i++)
BLOCKING_INIT_NOTIFIER_HEAD(&sriov->vfs_ctx[i].notifier);
return 0; return 0;
} }
@ -281,3 +294,53 @@ void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
kfree(sriov->vfs_ctx); kfree(sriov->vfs_ctx);
} }
/**
* mlx5_sriov_blocking_notifier_unregister - Unregister a VF from
* a notification block chain.
*
* @mdev: The mlx5 core device.
* @vf_id: The VF id.
* @nb: The notifier block to be unregistered.
*/
void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
int vf_id,
struct notifier_block *nb)
{
struct mlx5_vf_context *vfs_ctx;
struct mlx5_core_sriov *sriov;
sriov = &mdev->priv.sriov;
if (WARN_ON(vf_id < 0 || vf_id >= sriov->num_vfs))
return;
vfs_ctx = &sriov->vfs_ctx[vf_id];
blocking_notifier_chain_unregister(&vfs_ctx->notifier, nb);
}
EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_unregister);
/**
* mlx5_sriov_blocking_notifier_register - Register a VF notification
* block chain.
*
* @mdev: The mlx5 core device.
* @vf_id: The VF id.
* @nb: The notifier block to be called upon the VF events.
*
* Returns 0 on success or an error code.
*/
int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
int vf_id,
struct notifier_block *nb)
{
struct mlx5_vf_context *vfs_ctx;
struct mlx5_core_sriov *sriov;
sriov = &mdev->priv.sriov;
if (vf_id < 0 || vf_id >= sriov->num_vfs)
return -EINVAL;
vfs_ctx = &sriov->vfs_ctx[vf_id];
return blocking_notifier_chain_register(&vfs_ctx->notifier, nb);
}
EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_register);

View File

@ -5,89 +5,157 @@
#include "cmd.h" #include "cmd.h"
int mlx5vf_cmd_suspend_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod) static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
u16 *vhca_id);
int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
{ {
struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
u32 out[MLX5_ST_SZ_DW(suspend_vhca_out)] = {}; u32 out[MLX5_ST_SZ_DW(suspend_vhca_out)] = {};
u32 in[MLX5_ST_SZ_DW(suspend_vhca_in)] = {}; u32 in[MLX5_ST_SZ_DW(suspend_vhca_in)] = {};
int ret;
if (!mdev) lockdep_assert_held(&mvdev->state_mutex);
if (mvdev->mdev_detach)
return -ENOTCONN; return -ENOTCONN;
MLX5_SET(suspend_vhca_in, in, opcode, MLX5_CMD_OP_SUSPEND_VHCA); MLX5_SET(suspend_vhca_in, in, opcode, MLX5_CMD_OP_SUSPEND_VHCA);
MLX5_SET(suspend_vhca_in, in, vhca_id, vhca_id); MLX5_SET(suspend_vhca_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(suspend_vhca_in, in, op_mod, op_mod); MLX5_SET(suspend_vhca_in, in, op_mod, op_mod);
ret = mlx5_cmd_exec_inout(mdev, suspend_vhca, in, out); return mlx5_cmd_exec_inout(mvdev->mdev, suspend_vhca, in, out);
mlx5_vf_put_core_dev(mdev);
return ret;
} }
int mlx5vf_cmd_resume_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod) int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
{ {
struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
u32 out[MLX5_ST_SZ_DW(resume_vhca_out)] = {}; u32 out[MLX5_ST_SZ_DW(resume_vhca_out)] = {};
u32 in[MLX5_ST_SZ_DW(resume_vhca_in)] = {}; u32 in[MLX5_ST_SZ_DW(resume_vhca_in)] = {};
int ret;
if (!mdev) lockdep_assert_held(&mvdev->state_mutex);
if (mvdev->mdev_detach)
return -ENOTCONN; return -ENOTCONN;
MLX5_SET(resume_vhca_in, in, opcode, MLX5_CMD_OP_RESUME_VHCA); MLX5_SET(resume_vhca_in, in, opcode, MLX5_CMD_OP_RESUME_VHCA);
MLX5_SET(resume_vhca_in, in, vhca_id, vhca_id); MLX5_SET(resume_vhca_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(resume_vhca_in, in, op_mod, op_mod); MLX5_SET(resume_vhca_in, in, op_mod, op_mod);
ret = mlx5_cmd_exec_inout(mdev, resume_vhca, in, out); return mlx5_cmd_exec_inout(mvdev->mdev, resume_vhca, in, out);
mlx5_vf_put_core_dev(mdev);
return ret;
} }
int mlx5vf_cmd_query_vhca_migration_state(struct pci_dev *pdev, u16 vhca_id, int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
size_t *state_size) size_t *state_size)
{ {
struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
u32 out[MLX5_ST_SZ_DW(query_vhca_migration_state_out)] = {}; u32 out[MLX5_ST_SZ_DW(query_vhca_migration_state_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_vhca_migration_state_in)] = {}; u32 in[MLX5_ST_SZ_DW(query_vhca_migration_state_in)] = {};
int ret; int ret;
if (!mdev) lockdep_assert_held(&mvdev->state_mutex);
if (mvdev->mdev_detach)
return -ENOTCONN; return -ENOTCONN;
MLX5_SET(query_vhca_migration_state_in, in, opcode, MLX5_SET(query_vhca_migration_state_in, in, opcode,
MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE); MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE);
MLX5_SET(query_vhca_migration_state_in, in, vhca_id, vhca_id); MLX5_SET(query_vhca_migration_state_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(query_vhca_migration_state_in, in, op_mod, 0); MLX5_SET(query_vhca_migration_state_in, in, op_mod, 0);
ret = mlx5_cmd_exec_inout(mdev, query_vhca_migration_state, in, out); ret = mlx5_cmd_exec_inout(mvdev->mdev, query_vhca_migration_state, in,
out);
if (ret) if (ret)
goto end; return ret;
*state_size = MLX5_GET(query_vhca_migration_state_out, out, *state_size = MLX5_GET(query_vhca_migration_state_out, out,
required_umem_size); required_umem_size);
return 0;
end:
mlx5_vf_put_core_dev(mdev);
return ret;
} }
int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id) static int mlx5fv_vf_event(struct notifier_block *nb,
unsigned long event, void *data)
{
struct mlx5vf_pci_core_device *mvdev =
container_of(nb, struct mlx5vf_pci_core_device, nb);
mutex_lock(&mvdev->state_mutex);
switch (event) {
case MLX5_PF_NOTIFY_ENABLE_VF:
mvdev->mdev_detach = false;
break;
case MLX5_PF_NOTIFY_DISABLE_VF:
mlx5vf_disable_fds(mvdev);
mvdev->mdev_detach = true;
break;
default:
break;
}
mlx5vf_state_mutex_unlock(mvdev);
return 0;
}
void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
{
if (!mvdev->migrate_cap)
return;
mlx5_sriov_blocking_notifier_unregister(mvdev->mdev, mvdev->vf_id,
&mvdev->nb);
destroy_workqueue(mvdev->cb_wq);
}
void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev)
{
struct pci_dev *pdev = mvdev->core_device.pdev;
int ret;
if (!pdev->is_virtfn)
return;
mvdev->mdev = mlx5_vf_get_core_dev(pdev);
if (!mvdev->mdev)
return;
if (!MLX5_CAP_GEN(mvdev->mdev, migration))
goto end;
mvdev->vf_id = pci_iov_vf_id(pdev);
if (mvdev->vf_id < 0)
goto end;
if (mlx5vf_cmd_get_vhca_id(mvdev->mdev, mvdev->vf_id + 1,
&mvdev->vhca_id))
goto end;
mvdev->cb_wq = alloc_ordered_workqueue("mlx5vf_wq", 0);
if (!mvdev->cb_wq)
goto end;
mutex_init(&mvdev->state_mutex);
spin_lock_init(&mvdev->reset_lock);
mvdev->nb.notifier_call = mlx5fv_vf_event;
ret = mlx5_sriov_blocking_notifier_register(mvdev->mdev, mvdev->vf_id,
&mvdev->nb);
if (ret) {
destroy_workqueue(mvdev->cb_wq);
goto end;
}
mvdev->migrate_cap = 1;
mvdev->core_device.vdev.migration_flags =
VFIO_MIGRATION_STOP_COPY |
VFIO_MIGRATION_P2P;
end:
mlx5_vf_put_core_dev(mvdev->mdev);
}
static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
u16 *vhca_id)
{ {
struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {}; u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
int out_size; int out_size;
void *out; void *out;
int ret; int ret;
if (!mdev)
return -ENOTCONN;
out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out); out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
out = kzalloc(out_size, GFP_KERNEL); out = kzalloc(out_size, GFP_KERNEL);
if (!out) { if (!out)
ret = -ENOMEM; return -ENOMEM;
goto end;
}
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, other_function, 1); MLX5_SET(query_hca_cap_in, in, other_function, 1);
@ -105,8 +173,6 @@ int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id)
err_exec: err_exec:
kfree(out); kfree(out);
end:
mlx5_vf_put_core_dev(mdev);
return ret; return ret;
} }
@ -151,21 +217,68 @@ static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn,
return err; return err;
} }
int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id, void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work)
{
struct mlx5vf_async_data *async_data = container_of(_work,
struct mlx5vf_async_data, work);
struct mlx5_vf_migration_file *migf = container_of(async_data,
struct mlx5_vf_migration_file, async_data);
struct mlx5_core_dev *mdev = migf->mvdev->mdev;
mutex_lock(&migf->lock);
if (async_data->status) {
migf->is_err = true;
wake_up_interruptible(&migf->poll_wait);
}
mutex_unlock(&migf->lock);
mlx5_core_destroy_mkey(mdev, async_data->mkey);
dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
mlx5_core_dealloc_pd(mdev, async_data->pdn);
kvfree(async_data->out);
fput(migf->filp);
}
static void mlx5vf_save_callback(int status, struct mlx5_async_work *context)
{
struct mlx5vf_async_data *async_data = container_of(context,
struct mlx5vf_async_data, cb_work);
struct mlx5_vf_migration_file *migf = container_of(async_data,
struct mlx5_vf_migration_file, async_data);
if (!status) {
WRITE_ONCE(migf->total_length,
MLX5_GET(save_vhca_state_out, async_data->out,
actual_image_size));
wake_up_interruptible(&migf->poll_wait);
}
/*
* The error and the cleanup flows can't run from an
* interrupt context
*/
async_data->status = status;
queue_work(migf->mvdev->cb_wq, &async_data->work);
}
int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf) struct mlx5_vf_migration_file *migf)
{ {
struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev); u32 out_size = MLX5_ST_SZ_BYTES(save_vhca_state_out);
u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {};
u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {}; u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
struct mlx5vf_async_data *async_data;
struct mlx5_core_dev *mdev;
u32 pdn, mkey; u32 pdn, mkey;
int err; int err;
if (!mdev) lockdep_assert_held(&mvdev->state_mutex);
if (mvdev->mdev_detach)
return -ENOTCONN; return -ENOTCONN;
mdev = mvdev->mdev;
err = mlx5_core_alloc_pd(mdev, &pdn); err = mlx5_core_alloc_pd(mdev, &pdn);
if (err) if (err)
goto end; return err;
err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE,
0); 0);
@ -179,45 +292,54 @@ int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id,
MLX5_SET(save_vhca_state_in, in, opcode, MLX5_SET(save_vhca_state_in, in, opcode,
MLX5_CMD_OP_SAVE_VHCA_STATE); MLX5_CMD_OP_SAVE_VHCA_STATE);
MLX5_SET(save_vhca_state_in, in, op_mod, 0); MLX5_SET(save_vhca_state_in, in, op_mod, 0);
MLX5_SET(save_vhca_state_in, in, vhca_id, vhca_id); MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(save_vhca_state_in, in, mkey, mkey); MLX5_SET(save_vhca_state_in, in, mkey, mkey);
MLX5_SET(save_vhca_state_in, in, size, migf->total_length); MLX5_SET(save_vhca_state_in, in, size, migf->total_length);
err = mlx5_cmd_exec_inout(mdev, save_vhca_state, in, out); async_data = &migf->async_data;
async_data->out = kvzalloc(out_size, GFP_KERNEL);
if (!async_data->out) {
err = -ENOMEM;
goto err_out;
}
/* no data exists till the callback comes back */
migf->total_length = 0;
get_file(migf->filp);
async_data->mkey = mkey;
async_data->pdn = pdn;
err = mlx5_cmd_exec_cb(&migf->async_ctx, in, sizeof(in),
async_data->out,
out_size, mlx5vf_save_callback,
&async_data->cb_work);
if (err) if (err)
goto err_exec; goto err_exec;
migf->total_length =
MLX5_GET(save_vhca_state_out, out, actual_image_size);
mlx5_core_destroy_mkey(mdev, mkey);
mlx5_core_dealloc_pd(mdev, pdn);
dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
mlx5_vf_put_core_dev(mdev);
return 0; return 0;
err_exec: err_exec:
fput(migf->filp);
kvfree(async_data->out);
err_out:
mlx5_core_destroy_mkey(mdev, mkey); mlx5_core_destroy_mkey(mdev, mkey);
err_create_mkey: err_create_mkey:
dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0); dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
err_dma_map: err_dma_map:
mlx5_core_dealloc_pd(mdev, pdn); mlx5_core_dealloc_pd(mdev, pdn);
end:
mlx5_vf_put_core_dev(mdev);
return err; return err;
} }
int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id, int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf) struct mlx5_vf_migration_file *migf)
{ {
struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev); struct mlx5_core_dev *mdev;
u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {}; u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {};
u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {}; u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
u32 pdn, mkey; u32 pdn, mkey;
int err; int err;
if (!mdev) lockdep_assert_held(&mvdev->state_mutex);
if (mvdev->mdev_detach)
return -ENOTCONN; return -ENOTCONN;
mutex_lock(&migf->lock); mutex_lock(&migf->lock);
@ -226,6 +348,7 @@ int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id,
goto end; goto end;
} }
mdev = mvdev->mdev;
err = mlx5_core_alloc_pd(mdev, &pdn); err = mlx5_core_alloc_pd(mdev, &pdn);
if (err) if (err)
goto end; goto end;
@ -241,7 +364,7 @@ int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id,
MLX5_SET(load_vhca_state_in, in, opcode, MLX5_SET(load_vhca_state_in, in, opcode,
MLX5_CMD_OP_LOAD_VHCA_STATE); MLX5_CMD_OP_LOAD_VHCA_STATE);
MLX5_SET(load_vhca_state_in, in, op_mod, 0); MLX5_SET(load_vhca_state_in, in, op_mod, 0);
MLX5_SET(load_vhca_state_in, in, vhca_id, vhca_id); MLX5_SET(load_vhca_state_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(load_vhca_state_in, in, mkey, mkey); MLX5_SET(load_vhca_state_in, in, mkey, mkey);
MLX5_SET(load_vhca_state_in, in, size, migf->total_length); MLX5_SET(load_vhca_state_in, in, size, migf->total_length);
@ -253,7 +376,6 @@ err_mkey:
err_reg: err_reg:
mlx5_core_dealloc_pd(mdev, pdn); mlx5_core_dealloc_pd(mdev, pdn);
end: end:
mlx5_vf_put_core_dev(mdev);
mutex_unlock(&migf->lock); mutex_unlock(&migf->lock);
return err; return err;
} }

View File

@ -7,12 +7,23 @@
#define MLX5_VFIO_CMD_H #define MLX5_VFIO_CMD_H
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/vfio_pci_core.h>
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
struct mlx5vf_async_data {
struct mlx5_async_work cb_work;
struct work_struct work;
int status;
u32 pdn;
u32 mkey;
void *out;
};
struct mlx5_vf_migration_file { struct mlx5_vf_migration_file {
struct file *filp; struct file *filp;
struct mutex lock; struct mutex lock;
bool disabled; u8 disabled:1;
u8 is_err:1;
struct sg_append_table table; struct sg_append_table table;
size_t total_length; size_t total_length;
@ -22,15 +33,42 @@ struct mlx5_vf_migration_file {
struct scatterlist *last_offset_sg; struct scatterlist *last_offset_sg;
unsigned int sg_last_entry; unsigned int sg_last_entry;
unsigned long last_offset; unsigned long last_offset;
struct mlx5vf_pci_core_device *mvdev;
wait_queue_head_t poll_wait;
struct mlx5_async_ctx async_ctx;
struct mlx5vf_async_data async_data;
}; };
int mlx5vf_cmd_suspend_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod); struct mlx5vf_pci_core_device {
int mlx5vf_cmd_resume_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod); struct vfio_pci_core_device core_device;
int mlx5vf_cmd_query_vhca_migration_state(struct pci_dev *pdev, u16 vhca_id, int vf_id;
u16 vhca_id;
u8 migrate_cap:1;
u8 deferred_reset:1;
u8 mdev_detach:1;
/* protect migration state */
struct mutex state_mutex;
enum vfio_device_mig_state mig_state;
/* protect the reset_done flow */
spinlock_t reset_lock;
struct mlx5_vf_migration_file *resuming_migf;
struct mlx5_vf_migration_file *saving_migf;
struct workqueue_struct *cb_wq;
struct notifier_block nb;
struct mlx5_core_dev *mdev;
};
int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
size_t *state_size); size_t *state_size);
int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id); void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev);
int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id, void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf); struct mlx5_vf_migration_file *migf);
int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id, int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf); struct mlx5_vf_migration_file *migf);
void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev);
void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work);
#endif /* MLX5_VFIO_CMD_H */ #endif /* MLX5_VFIO_CMD_H */

View File

@ -17,7 +17,6 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/vfio.h> #include <linux/vfio.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/vfio_pci_core.h>
#include <linux/anon_inodes.h> #include <linux/anon_inodes.h>
#include "cmd.h" #include "cmd.h"
@ -25,20 +24,6 @@
/* Arbitrary to prevent userspace from consuming endless memory */ /* Arbitrary to prevent userspace from consuming endless memory */
#define MAX_MIGRATION_SIZE (512*1024*1024) #define MAX_MIGRATION_SIZE (512*1024*1024)
struct mlx5vf_pci_core_device {
struct vfio_pci_core_device core_device;
u16 vhca_id;
u8 migrate_cap:1;
u8 deferred_reset:1;
/* protect migration state */
struct mutex state_mutex;
enum vfio_device_mig_state mig_state;
/* protect the reset_done flow */
spinlock_t reset_lock;
struct mlx5_vf_migration_file *resuming_migf;
struct mlx5_vf_migration_file *saving_migf;
};
static struct page * static struct page *
mlx5vf_get_migration_page(struct mlx5_vf_migration_file *migf, mlx5vf_get_migration_page(struct mlx5_vf_migration_file *migf,
unsigned long offset) unsigned long offset)
@ -149,12 +134,22 @@ static ssize_t mlx5vf_save_read(struct file *filp, char __user *buf, size_t len,
return -ESPIPE; return -ESPIPE;
pos = &filp->f_pos; pos = &filp->f_pos;
if (!(filp->f_flags & O_NONBLOCK)) {
if (wait_event_interruptible(migf->poll_wait,
READ_ONCE(migf->total_length) || migf->is_err))
return -ERESTARTSYS;
}
mutex_lock(&migf->lock); mutex_lock(&migf->lock);
if ((filp->f_flags & O_NONBLOCK) && !READ_ONCE(migf->total_length)) {
done = -EAGAIN;
goto out_unlock;
}
if (*pos > migf->total_length) { if (*pos > migf->total_length) {
done = -EINVAL; done = -EINVAL;
goto out_unlock; goto out_unlock;
} }
if (migf->disabled) { if (migf->disabled || migf->is_err) {
done = -ENODEV; done = -ENODEV;
goto out_unlock; goto out_unlock;
} }
@ -194,9 +189,28 @@ out_unlock:
return done; return done;
} }
static __poll_t mlx5vf_save_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct mlx5_vf_migration_file *migf = filp->private_data;
__poll_t pollflags = 0;
poll_wait(filp, &migf->poll_wait, wait);
mutex_lock(&migf->lock);
if (migf->disabled || migf->is_err)
pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
else if (READ_ONCE(migf->total_length))
pollflags = EPOLLIN | EPOLLRDNORM;
mutex_unlock(&migf->lock);
return pollflags;
}
static const struct file_operations mlx5vf_save_fops = { static const struct file_operations mlx5vf_save_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.read = mlx5vf_save_read, .read = mlx5vf_save_read,
.poll = mlx5vf_save_poll,
.release = mlx5vf_release_file, .release = mlx5vf_release_file,
.llseek = no_llseek, .llseek = no_llseek,
}; };
@ -222,9 +236,11 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev)
stream_open(migf->filp->f_inode, migf->filp); stream_open(migf->filp->f_inode, migf->filp);
mutex_init(&migf->lock); mutex_init(&migf->lock);
init_waitqueue_head(&migf->poll_wait);
ret = mlx5vf_cmd_query_vhca_migration_state( mlx5_cmd_init_async_ctx(mvdev->mdev, &migf->async_ctx);
mvdev->core_device.pdev, mvdev->vhca_id, &migf->total_length); INIT_WORK(&migf->async_data.work, mlx5vf_mig_file_cleanup_cb);
ret = mlx5vf_cmd_query_vhca_migration_state(mvdev,
&migf->total_length);
if (ret) if (ret)
goto out_free; goto out_free;
@ -233,8 +249,8 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev)
if (ret) if (ret)
goto out_free; goto out_free;
ret = mlx5vf_cmd_save_vhca_state(mvdev->core_device.pdev, migf->mvdev = mvdev;
mvdev->vhca_id, migf); ret = mlx5vf_cmd_save_vhca_state(mvdev, migf);
if (ret) if (ret)
goto out_free; goto out_free;
return migf; return migf;
@ -339,7 +355,7 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
return migf; return migf;
} }
static void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev) void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
{ {
if (mvdev->resuming_migf) { if (mvdev->resuming_migf) {
mlx5vf_disable_fd(mvdev->resuming_migf); mlx5vf_disable_fd(mvdev->resuming_migf);
@ -347,6 +363,8 @@ static void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
mvdev->resuming_migf = NULL; mvdev->resuming_migf = NULL;
} }
if (mvdev->saving_migf) { if (mvdev->saving_migf) {
mlx5_cmd_cleanup_async_ctx(&mvdev->saving_migf->async_ctx);
cancel_work_sync(&mvdev->saving_migf->async_data.work);
mlx5vf_disable_fd(mvdev->saving_migf); mlx5vf_disable_fd(mvdev->saving_migf);
fput(mvdev->saving_migf->filp); fput(mvdev->saving_migf->filp);
mvdev->saving_migf = NULL; mvdev->saving_migf = NULL;
@ -361,8 +379,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
int ret; int ret;
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) { if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) {
ret = mlx5vf_cmd_suspend_vhca( ret = mlx5vf_cmd_suspend_vhca(mvdev,
mvdev->core_device.pdev, mvdev->vhca_id,
MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER); MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
@ -370,8 +387,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
} }
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P) { if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
ret = mlx5vf_cmd_resume_vhca( ret = mlx5vf_cmd_resume_vhca(mvdev,
mvdev->core_device.pdev, mvdev->vhca_id,
MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER); MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
@ -379,8 +395,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
} }
if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) { if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
ret = mlx5vf_cmd_suspend_vhca( ret = mlx5vf_cmd_suspend_vhca(mvdev,
mvdev->core_device.pdev, mvdev->vhca_id,
MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR); MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
@ -388,8 +403,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
} }
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) { if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) {
ret = mlx5vf_cmd_resume_vhca( ret = mlx5vf_cmd_resume_vhca(mvdev,
mvdev->core_device.pdev, mvdev->vhca_id,
MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR); MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
@ -424,8 +438,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
} }
if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) { if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
ret = mlx5vf_cmd_load_vhca_state(mvdev->core_device.pdev, ret = mlx5vf_cmd_load_vhca_state(mvdev,
mvdev->vhca_id,
mvdev->resuming_migf); mvdev->resuming_migf);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
@ -444,7 +457,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
* This function is called in all state_mutex unlock cases to * This function is called in all state_mutex unlock cases to
* handle a 'deferred_reset' if exists. * handle a 'deferred_reset' if exists.
*/ */
static void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev) void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev)
{ {
again: again:
spin_lock(&mvdev->reset_lock); spin_lock(&mvdev->reset_lock);
@ -532,34 +545,16 @@ static int mlx5vf_pci_open_device(struct vfio_device *core_vdev)
struct mlx5vf_pci_core_device *mvdev = container_of( struct mlx5vf_pci_core_device *mvdev = container_of(
core_vdev, struct mlx5vf_pci_core_device, core_device.vdev); core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
struct vfio_pci_core_device *vdev = &mvdev->core_device; struct vfio_pci_core_device *vdev = &mvdev->core_device;
int vf_id;
int ret; int ret;
ret = vfio_pci_core_enable(vdev); ret = vfio_pci_core_enable(vdev);
if (ret) if (ret)
return ret; return ret;
if (!mvdev->migrate_cap) { if (mvdev->migrate_cap)
vfio_pci_core_finish_enable(vdev);
return 0;
}
vf_id = pci_iov_vf_id(vdev->pdev);
if (vf_id < 0) {
ret = vf_id;
goto out_disable;
}
ret = mlx5vf_cmd_get_vhca_id(vdev->pdev, vf_id + 1, &mvdev->vhca_id);
if (ret)
goto out_disable;
mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING; mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
vfio_pci_core_finish_enable(vdev); vfio_pci_core_finish_enable(vdev);
return 0; return 0;
out_disable:
vfio_pci_core_disable(vdev);
return ret;
} }
static void mlx5vf_pci_close_device(struct vfio_device *core_vdev) static void mlx5vf_pci_close_device(struct vfio_device *core_vdev)
@ -596,24 +591,7 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
if (!mvdev) if (!mvdev)
return -ENOMEM; return -ENOMEM;
vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops); vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
mlx5vf_cmd_set_migratable(mvdev);
if (pdev->is_virtfn) {
struct mlx5_core_dev *mdev =
mlx5_vf_get_core_dev(pdev);
if (mdev) {
if (MLX5_CAP_GEN(mdev, migration)) {
mvdev->migrate_cap = 1;
mvdev->core_device.vdev.migration_flags =
VFIO_MIGRATION_STOP_COPY |
VFIO_MIGRATION_P2P;
mutex_init(&mvdev->state_mutex);
spin_lock_init(&mvdev->reset_lock);
}
mlx5_vf_put_core_dev(mdev);
}
}
ret = vfio_pci_core_register_device(&mvdev->core_device); ret = vfio_pci_core_register_device(&mvdev->core_device);
if (ret) if (ret)
goto out_free; goto out_free;
@ -622,6 +600,7 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
return 0; return 0;
out_free: out_free:
mlx5vf_cmd_remove_migratable(mvdev);
vfio_pci_core_uninit_device(&mvdev->core_device); vfio_pci_core_uninit_device(&mvdev->core_device);
kfree(mvdev); kfree(mvdev);
return ret; return ret;
@ -632,6 +611,7 @@ static void mlx5vf_pci_remove(struct pci_dev *pdev)
struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev); struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev);
vfio_pci_core_unregister_device(&mvdev->core_device); vfio_pci_core_unregister_device(&mvdev->core_device);
mlx5vf_cmd_remove_migratable(mvdev);
vfio_pci_core_uninit_device(&mvdev->core_device); vfio_pci_core_uninit_device(&mvdev->core_device);
kfree(mvdev); kfree(mvdev);
} }

View File

@ -111,46 +111,43 @@ struct mlx5_accel_esp_xfrm {
struct mlx5_accel_esp_xfrm_attrs attrs; struct mlx5_accel_esp_xfrm_attrs attrs;
}; };
enum {
MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0,
};
enum mlx5_accel_ipsec_cap { enum mlx5_accel_ipsec_cap {
MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0,
MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1, MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 1,
MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2, MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 2,
MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3, MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 3,
MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4, MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 4,
MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5,
MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6,
MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7,
}; };
#ifdef CONFIG_MLX5_ACCEL #ifdef CONFIG_MLX5_EN_IPSEC
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
struct mlx5_accel_esp_xfrm * struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs, const struct mlx5_accel_esp_xfrm_attrs *attrs);
u32 flags);
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs); const struct mlx5_accel_esp_xfrm_attrs *attrs);
#else #else
static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
return 0;
}
static inline struct mlx5_accel_esp_xfrm * static inline struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs, const struct mlx5_accel_esp_xfrm_attrs *attrs)
u32 flags) { return ERR_PTR(-EOPNOTSUPP); } {
return ERR_PTR(-EOPNOTSUPP);
}
static inline void static inline void
mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {} mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {}
static inline int static inline int
mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; } const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_ACCEL */ #endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5_ACCEL_H__ */ #endif /* __MLX5_ACCEL_H__ */

View File

@ -445,6 +445,11 @@ struct mlx5_qp_table {
struct radix_tree_root tree; struct radix_tree_root tree;
}; };
enum {
MLX5_PF_NOTIFY_DISABLE_VF,
MLX5_PF_NOTIFY_ENABLE_VF,
};
struct mlx5_vf_context { struct mlx5_vf_context {
int enabled; int enabled;
u64 port_guid; u64 port_guid;
@ -455,6 +460,7 @@ struct mlx5_vf_context {
u8 port_guid_valid:1; u8 port_guid_valid:1;
u8 node_guid_valid:1; u8 node_guid_valid:1;
enum port_state_policy policy; enum port_state_policy policy;
struct blocking_notifier_head notifier;
}; };
struct mlx5_core_sriov { struct mlx5_core_sriov {
@ -777,9 +783,6 @@ struct mlx5_core_dev {
} roce; } roce;
#ifdef CONFIG_MLX5_FPGA #ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga; struct mlx5_fpga_device *fpga;
#endif
#ifdef CONFIG_MLX5_ACCEL
const struct mlx5_accel_ipsec_ops *ipsec_ops;
#endif #endif
struct mlx5_clock clock; struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info; struct mlx5_ib_clock_info *clock_info;
@ -1155,6 +1158,12 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev); struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev); void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);
int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
int vf_id,
struct notifier_block *nb);
void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
int vf_id,
struct notifier_block *nb);
#ifdef CONFIG_MLX5_CORE_IPOIB #ifdef CONFIG_MLX5_CORE_IPOIB
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
struct ib_device *ibdev, struct ib_device *ibdev,

View File

@ -54,7 +54,6 @@ enum {
enum { enum {
MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2, MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2,
MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS = 0x3,
}; };
struct mlx5_ifc_fpga_shell_caps_bits { struct mlx5_ifc_fpga_shell_caps_bits {
@ -387,89 +386,6 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_tls_extended_cap_bits {
u8 aes_gcm_128[0x1];
u8 aes_gcm_256[0x1];
u8 reserved_at_2[0x1e];
u8 reserved_at_20[0x20];
u8 context_capacity_total[0x20];
u8 context_capacity_rx[0x20];
u8 context_capacity_tx[0x20];
u8 reserved_at_a0[0x10];
u8 tls_counter_size[0x10];
u8 tls_counters_addr_low[0x20];
u8 tls_counters_addr_high[0x20];
u8 rx[0x1];
u8 tx[0x1];
u8 tls_v12[0x1];
u8 tls_v13[0x1];
u8 lro[0x1];
u8 ipv6[0x1];
u8 reserved_at_106[0x1a];
};
struct mlx5_ifc_ipsec_extended_cap_bits {
u8 encapsulation[0x20];
u8 reserved_0[0x12];
u8 v2_command[0x1];
u8 udp_encap[0x1];
u8 rx_no_trailer[0x1];
u8 ipv4_fragment[0x1];
u8 ipv6[0x1];
u8 esn[0x1];
u8 lso[0x1];
u8 transport_and_tunnel_mode[0x1];
u8 tunnel_mode[0x1];
u8 transport_mode[0x1];
u8 ah_esp[0x1];
u8 esp[0x1];
u8 ah[0x1];
u8 ipv4_options[0x1];
u8 auth_alg[0x20];
u8 enc_alg[0x20];
u8 sa_cap[0x20];
u8 reserved_1[0x10];
u8 number_of_ipsec_counters[0x10];
u8 ipsec_counters_addr_low[0x20];
u8 ipsec_counters_addr_high[0x20];
};
struct mlx5_ifc_ipsec_counters_bits {
u8 dec_in_packets[0x40];
u8 dec_out_packets[0x40];
u8 dec_bypass_packets[0x40];
u8 enc_in_packets[0x40];
u8 enc_out_packets[0x40];
u8 enc_bypass_packets[0x40];
u8 drop_dec_packets[0x40];
u8 failed_auth_dec_packets[0x40];
u8 drop_enc_packets[0x40];
u8 success_add_sa[0x40];
u8 fail_add_sa[0x40];
u8 success_delete_sa[0x40];
u8 fail_delete_sa[0x40];
u8 dropped_cmd[0x40];
};
enum { enum {
MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1, MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1,
MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2, MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2,
@ -486,131 +402,4 @@ struct mlx5_ifc_fpga_qp_error_event_bits {
u8 reserved_at_c0[0x8]; u8 reserved_at_c0[0x8];
u8 fpga_qpn[0x18]; u8 fpga_qpn[0x18];
}; };
enum mlx5_ifc_fpga_ipsec_response_syndrome {
MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0,
MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2,
MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
};
struct mlx5_ifc_fpga_ipsec_cmd_resp {
__be32 syndrome;
union {
__be32 sw_sa_handle;
__be32 flags;
};
u8 reserved[24];
} __packed;
enum mlx5_ifc_fpga_ipsec_cmd_opcode {
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0,
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1,
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2,
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3,
MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4,
MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5,
};
enum mlx5_ifc_fpga_ipsec_cap {
MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0),
};
struct mlx5_ifc_fpga_ipsec_cmd_cap {
__be32 cmd;
__be32 flags;
u8 reserved[24];
} __packed;
enum mlx5_ifc_fpga_ipsec_sa_flags {
MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0),
MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1),
MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2),
MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3),
MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4),
MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5),
MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6),
MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7),
};
enum mlx5_ifc_fpga_ipsec_sa_enc_mode {
MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0,
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1,
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3,
};
struct mlx5_ifc_fpga_ipsec_sa_v1 {
__be32 cmd;
u8 key_enc[32];
u8 key_auth[32];
__be32 sip[4];
__be32 dip[4];
union {
struct {
__be32 reserved;
u8 salt_iv[8];
__be32 salt;
} __packed gcm;
struct {
u8 salt[16];
} __packed cbc;
};
__be32 spi;
__be32 sw_sa_handle;
__be16 tfclen;
u8 enc_mode;
u8 reserved1[2];
u8 flags;
u8 reserved2[2];
};
struct mlx5_ifc_fpga_ipsec_sa {
struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1;
__be16 udp_sp;
__be16 udp_dp;
u8 reserved1[4];
__be32 esn;
__be16 vid; /* only 12 bits, rest is reserved */
__be16 reserved2;
} __packed;
enum fpga_tls_cmds {
CMD_SETUP_STREAM = 0x1001,
CMD_TEARDOWN_STREAM = 0x1002,
CMD_RESYNC_RX = 0x1003,
};
#define MLX5_TLS_1_2 (0)
#define MLX5_TLS_ALG_AES_GCM_128 (0)
#define MLX5_TLS_ALG_AES_GCM_256 (1)
struct mlx5_ifc_tls_cmd_bits {
u8 command_type[0x20];
u8 ipv6[0x1];
u8 direction_sx[0x1];
u8 tls_version[0x2];
u8 reserved[0x1c];
u8 swid[0x20];
u8 src_port[0x10];
u8 dst_port[0x10];
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
u8 tls_rcd_sn[0x40];
u8 tcp_sn[0x20];
u8 tls_implicit_iv[0x20];
u8 tls_xor_iv[0x40];
u8 encryption_key[0x100];
u8 alg[4];
u8 reserved2[0x1c];
u8 reserved3[0x4a0];
};
struct mlx5_ifc_tls_resp_bits {
u8 syndrome[0x20];
u8 stream_id[0x20];
u8 reserved[0x40];
};
#define MLX5_TLS_COMMAND_SIZE (0x100)
#endif /* MLX5_IFC_FPGA_H */ #endif /* MLX5_IFC_FPGA_H */

View File

@ -141,7 +141,7 @@ enum mlx5_ptys_width {
MLX5_PTYS_WIDTH_12X = 1 << 4, MLX5_PTYS_WIDTH_12X = 1 << 4,
}; };
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode) #define MLX5E_PROT_MASK(link_mode) (1U << link_mode)
#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \ #define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
(ext ? MLX5_GET(reg, out, ext_##field) : \ (ext ? MLX5_GET(reg, out, ext_##field) : \
MLX5_GET(reg, out, field)) MLX5_GET(reg, out, field))

View File

@ -2497,15 +2497,7 @@ struct ib_device_ops {
struct ib_flow_attr *flow_attr, struct ib_flow_attr *flow_attr,
struct ib_udata *udata); struct ib_udata *udata);
int (*destroy_flow)(struct ib_flow *flow_id); int (*destroy_flow)(struct ib_flow *flow_id);
struct ib_flow_action *(*create_flow_action_esp)(
struct ib_device *device,
const struct ib_flow_action_attrs_esp *attr,
struct uverbs_attr_bundle *attrs);
int (*destroy_flow_action)(struct ib_flow_action *action); int (*destroy_flow_action)(struct ib_flow_action *action);
int (*modify_flow_action_esp)(
struct ib_flow_action *action,
const struct ib_flow_action_attrs_esp *attr,
struct uverbs_attr_bundle *attrs);
int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port, int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
int state); int state);
int (*get_vf_config)(struct ib_device *device, int vf, u32 port, int (*get_vf_config)(struct ib_device *device, int vf, u32 port,