mlx5-fixes-2019-09-24
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl2J5AAACgkQSD+KveBX +j7Jqwf7BWVC7ul3q19W2IkHjo+ts03eB43n+egQR8f0X5QJ9ClG167OA+JQd+9d y/2rJ5tWtFU7oWJOweSWHJXlSIK8iRTEe0w7fFWCQ9P1ne7BsO9tC6nTJ/X7rfc+ UP/+iLcMi10JnNGdqk+dZs/nSNNw24fH4Drd1WC2YhMxZWVKeChxlSnez40I7Rdq ij8SiL3sCxt+486IYENVXnpR849gO/K5XY5qasHh6QGQzFXAOlecp9zUXyDyEyL+ L6uDOTmzcnl6CHMgNfyEXqu+ZTo+rnycg6Yx0aaPCz92nMgo2zXwg/KoeT0zwJg5 mfaXvFlgM0BIGnAtETP44jTq4vKQow== =0IpF -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2019-09-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== Mellanox, mlx5 fixes 2019-09-24 This series introduces some fixes to mlx5 driver. For more information please see tag log below. Please pull and let me know if there is any problem. For -stable v4.20: ('net/mlx5e: Fix traffic duplication in ethtool steering') For -stable v4.19: ('net/mlx5: Add device ID of upcoming BlueField-2') For -stable v5.3: ('net/mlx5e: Fix matching on tunnel addresses type') ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
2dbf45d135
|
@ -399,10 +399,10 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
|
|||
struct mlx5_flow_table *ft,
|
||||
struct ethtool_rx_flow_spec *fs)
|
||||
{
|
||||
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
|
||||
struct mlx5_flow_destination *dst = NULL;
|
||||
struct mlx5_flow_act flow_act = {0};
|
||||
struct mlx5_flow_spec *spec;
|
||||
struct mlx5_flow_handle *rule;
|
||||
struct mlx5_flow_spec *spec;
|
||||
int err = 0;
|
||||
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
|
|
|
@ -1664,46 +1664,63 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
|
||||
struct flow_match_ipv4_addrs match;
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
|
||||
struct flow_match_control match;
|
||||
u16 addr_type;
|
||||
|
||||
flow_rule_match_enc_ipv4_addrs(rule, &match);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
src_ipv4_src_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.mask->src));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
src_ipv4_src_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.key->src));
|
||||
flow_rule_match_enc_control(rule, &match);
|
||||
addr_type = match.key->addr_type;
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.mask->dst));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.key->dst));
|
||||
/* For tunnel addr_type used same key id`s as for non-tunnel */
|
||||
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
|
||||
struct flow_match_ipv4_addrs match;
|
||||
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
|
||||
} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
|
||||
struct flow_match_ipv6_addrs match;
|
||||
flow_rule_match_enc_ipv4_addrs(rule, &match);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
src_ipv4_src_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.mask->src));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
src_ipv4_src_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.key->src));
|
||||
|
||||
flow_rule_match_enc_ipv6_addrs(rule, &match);
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
||||
&match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
||||
&match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.mask->dst));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.key->dst));
|
||||
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
||||
&match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
||||
&match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
|
||||
ethertype);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
|
||||
ETH_P_IP);
|
||||
} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
|
||||
struct flow_match_ipv6_addrs match;
|
||||
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
|
||||
flow_rule_match_enc_ipv6_addrs(rule, &match);
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
||||
&match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
|
||||
ipv6));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
||||
&match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
|
||||
ipv6));
|
||||
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
||||
&match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
|
||||
ipv6));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
||||
&match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
|
||||
ipv6));
|
||||
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
|
||||
ethertype);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
|
||||
ETH_P_IPV6);
|
||||
}
|
||||
}
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
|
||||
|
|
|
@ -1568,6 +1568,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
|
|||
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
|
||||
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
|
||||
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
|
||||
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
|
|
|
@ -615,7 +615,7 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
|
|||
* that recalculates the CS and forwards to the vport.
|
||||
*/
|
||||
ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport.dmn,
|
||||
dest_action->vport.num,
|
||||
dest_action->vport.caps->num,
|
||||
final_icm_addr);
|
||||
if (ret) {
|
||||
mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
|
||||
|
@ -744,7 +744,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
|
|||
dest_action = action;
|
||||
if (rx_rule) {
|
||||
/* Loopback on WIRE vport is not supported */
|
||||
if (action->vport.num == WIRE_PORT)
|
||||
if (action->vport.caps->num == WIRE_PORT)
|
||||
goto out_invalid_arg;
|
||||
|
||||
attr.final_icm_addr = action->vport.caps->icm_address_rx;
|
||||
|
|
|
@ -230,8 +230,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
|
|||
(dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
|
||||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
|
||||
ret = mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
|
||||
&dmn->info.caps,
|
||||
inner, rx);
|
||||
dmn, inner, rx);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -458,13 +457,11 @@ static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher)
|
|||
|
||||
prev_matcher = NULL;
|
||||
if (next_matcher && !first)
|
||||
prev_matcher = list_entry(next_matcher->matcher_list.prev,
|
||||
struct mlx5dr_matcher,
|
||||
matcher_list);
|
||||
prev_matcher = list_prev_entry(next_matcher, matcher_list);
|
||||
else if (!first)
|
||||
prev_matcher = list_entry(tbl->matcher_list.prev,
|
||||
struct mlx5dr_matcher,
|
||||
matcher_list);
|
||||
prev_matcher = list_last_entry(&tbl->matcher_list,
|
||||
struct mlx5dr_matcher,
|
||||
matcher_list);
|
||||
|
||||
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
|
||||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
|
||||
|
|
|
@ -18,7 +18,7 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
|
|||
struct mlx5dr_ste *last_ste;
|
||||
|
||||
/* The new entry will be inserted after the last */
|
||||
last_ste = list_entry(miss_list->prev, struct mlx5dr_ste, miss_list_node);
|
||||
last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
|
||||
WARN_ON(!last_ste);
|
||||
|
||||
ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
|
||||
|
|
|
@ -429,12 +429,9 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
|
|||
struct mlx5dr_ste *prev_ste;
|
||||
u64 miss_addr;
|
||||
|
||||
prev_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->prev, struct mlx5dr_ste,
|
||||
miss_list_node);
|
||||
if (!prev_ste) {
|
||||
WARN_ON(true);
|
||||
prev_ste = list_prev_entry(ste, miss_list_node);
|
||||
if (WARN_ON(!prev_ste))
|
||||
return;
|
||||
}
|
||||
|
||||
miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
|
||||
mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
|
||||
|
@ -461,8 +458,8 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
|
|||
struct mlx5dr_ste_htbl *stats_tbl;
|
||||
LIST_HEAD(send_ste_list);
|
||||
|
||||
first_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->next,
|
||||
struct mlx5dr_ste, miss_list_node);
|
||||
first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
|
||||
struct mlx5dr_ste, miss_list_node);
|
||||
stats_tbl = first_ste->htbl;
|
||||
|
||||
/* Two options:
|
||||
|
@ -479,8 +476,7 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
|
|||
if (last_ste == first_ste)
|
||||
next_ste = NULL;
|
||||
else
|
||||
next_ste = list_entry(ste->miss_list_node.next,
|
||||
struct mlx5dr_ste, miss_list_node);
|
||||
next_ste = list_next_entry(ste, miss_list_node);
|
||||
|
||||
if (!next_ste) {
|
||||
/* One and only entry in the list */
|
||||
|
@ -841,6 +837,8 @@ static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
|
|||
spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
|
||||
|
||||
spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
|
||||
spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask,
|
||||
source_eswitch_owner_vhca_id);
|
||||
|
||||
spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
|
||||
spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
|
||||
|
@ -2254,11 +2252,18 @@ static int dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
|
|||
{
|
||||
struct mlx5dr_match_misc *misc_mask = &value->misc;
|
||||
|
||||
if (misc_mask->source_port != 0xffff)
|
||||
/* Partial misc source_port is not supported */
|
||||
if (misc_mask->source_port && misc_mask->source_port != 0xffff)
|
||||
return -EINVAL;
|
||||
|
||||
/* Partial misc source_eswitch_owner_vhca_id is not supported */
|
||||
if (misc_mask->source_eswitch_owner_vhca_id &&
|
||||
misc_mask->source_eswitch_owner_vhca_id != 0xffff)
|
||||
return -EINVAL;
|
||||
|
||||
DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
|
||||
DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
|
||||
misc_mask->source_eswitch_owner_vhca_id = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2270,17 +2275,33 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
|
|||
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
|
||||
struct mlx5dr_match_misc *misc = &value->misc;
|
||||
struct mlx5dr_cmd_vport_cap *vport_cap;
|
||||
struct mlx5dr_domain *dmn = sb->dmn;
|
||||
struct mlx5dr_cmd_caps *caps;
|
||||
u8 *tag = hw_ste->tag;
|
||||
|
||||
DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
|
||||
|
||||
vport_cap = mlx5dr_get_vport_cap(sb->caps, misc->source_port);
|
||||
if (sb->vhca_id_valid) {
|
||||
/* Find port GVMI based on the eswitch_owner_vhca_id */
|
||||
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
|
||||
caps = &dmn->info.caps;
|
||||
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
|
||||
dmn->peer_dmn->info.caps.gvmi))
|
||||
caps = &dmn->peer_dmn->info.caps;
|
||||
else
|
||||
return -EINVAL;
|
||||
} else {
|
||||
caps = &dmn->info.caps;
|
||||
}
|
||||
|
||||
vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
|
||||
if (!vport_cap)
|
||||
return -EINVAL;
|
||||
|
||||
if (vport_cap->vport_gvmi)
|
||||
MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
|
||||
|
||||
misc->source_eswitch_owner_vhca_id = 0;
|
||||
misc->source_port = 0;
|
||||
|
||||
return 0;
|
||||
|
@ -2288,17 +2309,20 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
|
|||
|
||||
int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
struct mlx5dr_cmd_caps *caps,
|
||||
struct mlx5dr_domain *dmn,
|
||||
bool inner, bool rx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
|
||||
sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
|
||||
|
||||
ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sb->rx = rx;
|
||||
sb->caps = caps;
|
||||
sb->dmn = dmn;
|
||||
sb->inner = inner;
|
||||
sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
|
||||
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
|
||||
|
|
|
@ -180,6 +180,8 @@ void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
|
|||
struct mlx5dr_ste_build {
|
||||
u8 inner:1;
|
||||
u8 rx:1;
|
||||
u8 vhca_id_valid:1;
|
||||
struct mlx5dr_domain *dmn;
|
||||
struct mlx5dr_cmd_caps *caps;
|
||||
u8 lu_type;
|
||||
u16 byte_mask;
|
||||
|
@ -331,7 +333,7 @@ void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
|
|||
bool inner, bool rx);
|
||||
int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
struct mlx5dr_cmd_caps *caps,
|
||||
struct mlx5dr_domain *dmn,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
|
||||
|
||||
|
@ -453,7 +455,7 @@ struct mlx5dr_match_misc {
|
|||
u32 gre_c_present:1;
|
||||
/* Source port.;0xffff determines wire port */
|
||||
u32 source_port:16;
|
||||
u32 reserved_auto2:16;
|
||||
u32 source_eswitch_owner_vhca_id:16;
|
||||
/* VLAN ID of first VLAN tag the inner header of the incoming packet.
|
||||
* Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
|
||||
*/
|
||||
|
@ -745,7 +747,6 @@ struct mlx5dr_action {
|
|||
struct {
|
||||
struct mlx5dr_domain *dmn;
|
||||
struct mlx5dr_cmd_vport_cap *caps;
|
||||
u32 num;
|
||||
} vport;
|
||||
struct {
|
||||
u32 vlan_hdr; /* tpid_pcp_dei_vid */
|
||||
|
|
|
@ -282,7 +282,6 @@ enum {
|
|||
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
|
||||
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
|
||||
MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942,
|
||||
MLX5_CMD_OP_SYNC_STEERING = 0xb00,
|
||||
MLX5_CMD_OP_FPGA_CREATE_QP = 0x960,
|
||||
MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961,
|
||||
MLX5_CMD_OP_FPGA_QUERY_QP = 0x962,
|
||||
|
@ -296,6 +295,7 @@ enum {
|
|||
MLX5_CMD_OP_DESTROY_UCTX = 0xa06,
|
||||
MLX5_CMD_OP_CREATE_UMEM = 0xa08,
|
||||
MLX5_CMD_OP_DESTROY_UMEM = 0xa0a,
|
||||
MLX5_CMD_OP_SYNC_STEERING = 0xb00,
|
||||
MLX5_CMD_OP_MAX
|
||||
};
|
||||
|
||||
|
@ -487,7 +487,7 @@ union mlx5_ifc_gre_key_bits {
|
|||
|
||||
struct mlx5_ifc_fte_match_set_misc_bits {
|
||||
u8 gre_c_present[0x1];
|
||||
u8 reserved_auto1[0x1];
|
||||
u8 reserved_at_1[0x1];
|
||||
u8 gre_k_present[0x1];
|
||||
u8 gre_s_present[0x1];
|
||||
u8 source_vhca_port[0x4];
|
||||
|
@ -5054,50 +5054,50 @@ struct mlx5_ifc_query_hca_cap_in_bits {
|
|||
|
||||
struct mlx5_ifc_other_hca_cap_bits {
|
||||
u8 roce[0x1];
|
||||
u8 reserved_0[0x27f];
|
||||
u8 reserved_at_1[0x27f];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_other_hca_cap_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_0[0x18];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_1[0x40];
|
||||
u8 reserved_at_40[0x40];
|
||||
|
||||
struct mlx5_ifc_other_hca_cap_bits other_capability;
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_other_hca_cap_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_0[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_1[0x10];
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_2[0x10];
|
||||
u8 reserved_at_40[0x10];
|
||||
u8 function_id[0x10];
|
||||
|
||||
u8 reserved_3[0x20];
|
||||
u8 reserved_at_60[0x20];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_modify_other_hca_cap_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_0[0x18];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_1[0x40];
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_modify_other_hca_cap_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_0[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_1[0x10];
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_2[0x10];
|
||||
u8 reserved_at_40[0x10];
|
||||
u8 function_id[0x10];
|
||||
u8 field_select[0x20];
|
||||
|
||||
|
|
Loading…
Reference in New Issue