ethtool: add support to set/get completion queue event size

Add support to set completion queue event size via ethtool -G
parameter and get it via ethtool -g parameter.

~ # ./ethtool -G eth0 cqe-size 512
~ # ./ethtool -g eth0
Ring parameters for eth0:
Pre-set maximums:
RX:             1048576
RX Mini:        n/a
RX Jumbo:       n/a
TX:             1048576
Current hardware settings:
RX:             256
RX Mini:        n/a
RX Jumbo:       n/a
TX:             4096
RX Buf Len:             2048
CQE Size:                128

Signed-off-by: Subbaraya Sundeep <sbhatta@marvell.com>
Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: hongrongxuan <hongrongxuan@huawei.com>

 Conflicts:
	net/ethtool/netlink.h
This commit is contained in:
Subbaraya Sundeep 2022-02-23 00:09:12 +05:30 committed by Jianping Liu
parent 274112dce6
commit 5cfa066627
4 changed files with 33 additions and 2 deletions

View File

@ -818,6 +818,7 @@ Kernel response contents:
``ETHTOOL_A_RINGS_TX`` u32 size of TX ring
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
==================================== ====== ===========================
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with
@ -842,6 +843,7 @@ Request contents:
``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring
``ETHTOOL_A_RINGS_TX`` u32 size of TX ring
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
==================================== ====== ===========================
Kernel checks that requested ring sizes do not exceed limits reported by
@ -849,6 +851,15 @@ driver. Driver may impose additional constraints and may not suspport all
attributes.
``ETHTOOL_A_RINGS_CQE_SIZE`` specifies the completion queue event size.
Completion queue events(CQE) are the events posted by NIC to indicate the
completion status of a packet when the packet is sent(like send success or
error) or received(like pointers to packet fragments). The CQE size parameter
enables to modify the CQE size other than default size if NIC supports it.
A bigger CQE can have more receive buffer pointers inturn NIC can transfer
a bigger frame from wire. Based on the NIC hardware, the overall completion
queue size can be adjusted in the driver if CQE size is modified.
CHANNELS_GET
============

View File

@ -76,18 +76,22 @@ enum {
* struct kernel_ethtool_ringparam - RX/TX ring configuration
* @rx_buf_len: Current length of buffers on the rx ring.
* @tcp_data_split: Scatter packet headers and data to separate buffers
* @cqe_size: Size of TX/RX completion queue event
*/
struct kernel_ethtool_ringparam {
u32 rx_buf_len;
u8 tcp_data_split;
u32 cqe_size;
};
/**
* enum ethtool_supported_ring_param - indicator caps for setting ring params
* @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
* @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
*/
enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
};
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))

View File

@ -316,6 +316,7 @@ enum {
ETHTOOL_A_RINGS_TX, /* u32 */
ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */
ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
/* add new constants above here */
__ETHTOOL_A_RINGS_CNT,

View File

@ -63,7 +63,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */
nla_total_size(sizeof(u32)) + /* _RINGS_TX */
nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */
nla_total_size(sizeof(u8)); /* _RINGS_TCP_DATA_SPLIT */
nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */
nla_total_size(sizeof(u32)); /* _RINGS_CQE_SIZE */
}
static int rings_fill_reply(struct sk_buff *skb,
@ -100,7 +101,9 @@ static int rings_fill_reply(struct sk_buff *skb,
(nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) ||
(kr->tcp_data_split &&
(nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT,
kr->tcp_data_split))))
kr->tcp_data_split))) ||
(kr->cqe_size &&
(nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))))
return -EMSGSIZE;
return 0;
@ -135,6 +138,7 @@ rings_set_policy[ETHTOOL_A_RINGS_MAX + 1] = {
[ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 },
[ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 },
[ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1),
};
int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
@ -180,6 +184,8 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod);
ethnl_update_u32(&kernel_ringparam.rx_buf_len,
tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod);
ethnl_update_u32(&kernel_ringparam.cqe_size,
tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod);
ret = 0;
if (!mod)
goto out_ops;
@ -211,6 +217,15 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
goto out_ops;
}
if (kernel_ringparam.cqe_size &&
!(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) {
ret = -EOPNOTSUPP;
NL_SET_ERR_MSG_ATTR(info->extack,
tb[ETHTOOL_A_RINGS_CQE_SIZE],
"setting cqe size not supported");
goto out_ops;
}
ret = dev->ethtool_ops->set_ringparam(dev, &ringparam,
&kernel_ringparam, info->extack);
if (ret < 0)