ethtool: add header/data split indication

For applications running on a mix of platforms it's useful
to have a clear indication whether host's NIC supports the
geometry requirements of TCP zero-copy. TCP zero-copy Rx
requires data to be neatly placed into memory pages.
Most NICs can't do that.

This patch is adding GET support only, since the NICs
I work with either always have the feature enabled or
enable it whenever MTU is set to jumbo. In other words
I don't need SET. But adding set should be trivial.
(The only note on SET is that we will likely want
the setting to be "sticky" and use 0 / `unknown`
to reset it back to driver default.)

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: hongrongxuan <hongrongxuan@huawei.com>
This commit is contained in:
Jakub Kicinski 2022-01-27 10:42:59 -08:00 committed by Jianping Liu
parent 188865e66d
commit 274112dce6
4 changed files with 27 additions and 5 deletions

View File

@ -817,8 +817,16 @@ Kernel response contents:
``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring
``ETHTOOL_A_RINGS_TX`` u32 size of TX ring
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split
==================================== ====== ===========================
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with
page-flipping TCP zero-copy receive (``getsockopt(TCP_ZEROCOPY_RECEIVE)``).
If enabled the device is configured to place frame headers and data into
separate buffers. The device configuration must make it possible to receive
full memory pages of data, for example because MTU is high enough or through
HW-GRO.
RINGS_SET
=========

View File

@ -75,9 +75,11 @@ enum {
/**
* struct kernel_ethtool_ringparam - RX/TX ring configuration
* @rx_buf_len: Current length of buffers on the rx ring.
* @tcp_data_split: Scatter packet headers and data to separate buffers
*/
struct kernel_ethtool_ringparam {
u32 rx_buf_len;
u8 tcp_data_split;
};
/**

View File

@ -297,6 +297,12 @@ enum {
/* RINGS */
enum {
ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0,
ETHTOOL_TCP_DATA_SPLIT_DISABLED,
ETHTOOL_TCP_DATA_SPLIT_ENABLED,
};
enum {
ETHTOOL_A_RINGS_UNSPEC,
ETHTOOL_A_RINGS_HEADER, /* nest - _A_HEADER_* */
@ -309,6 +315,7 @@ enum {
ETHTOOL_A_RINGS_RX_JUMBO, /* u32 */
ETHTOOL_A_RINGS_TX, /* u32 */
ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */
ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
/* add new constants above here */
__ETHTOOL_A_RINGS_CNT,

View File

@ -62,7 +62,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */
nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */
nla_total_size(sizeof(u32)) + /* _RINGS_TX */
nla_total_size(sizeof(u32)); /* _RINGS_RX_BUF_LEN */
nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */
nla_total_size(sizeof(u8)); /* _RINGS_TCP_DATA_SPLIT */
}
static int rings_fill_reply(struct sk_buff *skb,
@ -70,9 +71,11 @@ static int rings_fill_reply(struct sk_buff *skb,
const struct ethnl_reply_data *reply_base)
{
const struct rings_reply_data *data = RINGS_REPDATA(reply_base);
const struct kernel_ethtool_ringparam *kernel_ringparam = &data->kernel_ringparam;
const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam;
const struct ethtool_ringparam *ringparam = &data->ringparam;
WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED);
if ((ringparam->rx_max_pending &&
(nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX,
ringparam->rx_max_pending) ||
@ -93,9 +96,11 @@ static int rings_fill_reply(struct sk_buff *skb,
ringparam->tx_max_pending) ||
nla_put_u32(skb, ETHTOOL_A_RINGS_TX,
ringparam->tx_pending))) ||
(kernel_ringparam->rx_buf_len &&
(nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN,
kernel_ringparam->rx_buf_len))))
(kr->rx_buf_len &&
(nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) ||
(kr->tcp_data_split &&
(nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT,
kr->tcp_data_split))))
return -EMSGSIZE;
return 0;