net: ethtool: extend ringparam set/get APIs for tx_push
Currently tx push is a standard driver feature which controls use of a fast path descriptor push. So this patch extends the ringparam APIs and data structures to support set/get tx push by ethtool -G/g. Signed-off-by: Jie Wang <wangjie125@huawei.com> Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: hongrongxuan <hongrongxuan@huawei.com> Conflicts: net/ethtool/netlink.h
This commit is contained in:
parent
5cfa066627
commit
202145b291
|
@ -819,6 +819,7 @@ Kernel response contents:
|
|||
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
|
||||
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split
|
||||
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
|
||||
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
|
||||
==================================== ====== ===========================
|
||||
|
||||
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with
|
||||
|
@ -828,6 +829,12 @@ separate buffers. The device configuration must make it possible to receive
|
|||
full memory pages of data, for example because MTU is high enough or through
|
||||
HW-GRO.
|
||||
|
||||
``ETHTOOL_A_RINGS_TX_PUSH`` flag is used to enable descriptor fast
|
||||
path to send packets. In ordinary path, driver fills descriptors in DRAM and
|
||||
notifies NIC hardware. In fast path, driver pushes descriptors to the device
|
||||
through MMIO writes, thus reducing the latency. However, enabling this feature
|
||||
may increase the CPU cost. Drivers may enforce additional per-packet
|
||||
eligibility checks (e.g. on packet size).
|
||||
|
||||
RINGS_SET
|
||||
=========
|
||||
|
@ -844,6 +851,7 @@ Request contents:
|
|||
``ETHTOOL_A_RINGS_TX`` u32 size of TX ring
|
||||
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
|
||||
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
|
||||
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
|
||||
==================================== ====== ===========================
|
||||
|
||||
Kernel checks that requested ring sizes do not exceed limits reported by
|
||||
|
|
|
@ -76,11 +76,13 @@ enum {
|
|||
* struct kernel_ethtool_ringparam - RX/TX ring configuration
|
||||
* @rx_buf_len: Current length of buffers on the rx ring.
|
||||
* @tcp_data_split: Scatter packet headers and data to separate buffers
|
||||
* @tx_push: The flag of tx push mode
|
||||
* @cqe_size: Size of TX/RX completion queue event
|
||||
*/
|
||||
struct kernel_ethtool_ringparam {
|
||||
u32 rx_buf_len;
|
||||
u8 tcp_data_split;
|
||||
u8 tx_push;
|
||||
u32 cqe_size;
|
||||
};
|
||||
|
||||
|
@ -88,10 +90,12 @@ struct kernel_ethtool_ringparam {
|
|||
* enum ethtool_supported_ring_param - indicator caps for setting ring params
|
||||
* @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
|
||||
* @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
|
||||
* @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
|
||||
*/
|
||||
enum ethtool_supported_ring_param {
|
||||
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
|
||||
ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
|
||||
ETHTOOL_RING_USE_TX_PUSH = BIT(2),
|
||||
};
|
||||
|
||||
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
|
||||
|
|
|
@ -317,6 +317,7 @@ enum {
|
|||
ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */
|
||||
ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
|
||||
ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
|
||||
ETHTOOL_A_RINGS_TX_PUSH, /* u8 */
|
||||
|
||||
/* add new constants above here */
|
||||
__ETHTOOL_A_RINGS_CNT,
|
||||
|
|
|
@ -64,7 +64,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base,
|
|||
nla_total_size(sizeof(u32)) + /* _RINGS_TX */
|
||||
nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */
|
||||
nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */
|
||||
nla_total_size(sizeof(u32)); /* _RINGS_CQE_SIZE */
|
||||
nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */
|
||||
nla_total_size(sizeof(u8))); /* _RINGS_TX_PUSH */
|
||||
}
|
||||
|
||||
static int rings_fill_reply(struct sk_buff *skb,
|
||||
|
@ -103,7 +104,8 @@ static int rings_fill_reply(struct sk_buff *skb,
|
|||
(nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT,
|
||||
kr->tcp_data_split))) ||
|
||||
(kr->cqe_size &&
|
||||
(nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))))
|
||||
(nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) ||
|
||||
nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push))
|
||||
return -EMSGSIZE;
|
||||
|
||||
return 0;
|
||||
|
@ -139,6 +141,7 @@ rings_set_policy[ETHTOOL_A_RINGS_MAX + 1] = {
|
|||
[ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 },
|
||||
[ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
|
||||
[ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1),
|
||||
[ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
|
||||
};
|
||||
|
||||
int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
|
||||
|
@ -170,6 +173,15 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
|
|||
if (!ops->get_ringparam || !ops->set_ringparam)
|
||||
goto out_dev;
|
||||
|
||||
if (tb[ETHTOOL_A_RINGS_TX_PUSH] &&
|
||||
!(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
NL_SET_ERR_MSG_ATTR(info->extack,
|
||||
tb[ETHTOOL_A_RINGS_TX_PUSH],
|
||||
"setting tx push not supported");
|
||||
goto out_dev;
|
||||
}
|
||||
|
||||
rtnl_lock();
|
||||
ret = ethnl_ops_begin(dev);
|
||||
if (ret < 0)
|
||||
|
@ -186,6 +198,8 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
|
|||
tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod);
|
||||
ethnl_update_u32(&kernel_ringparam.cqe_size,
|
||||
tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod);
|
||||
ethnl_update_u8(&kernel_ringparam.tx_push,
|
||||
tb[ETHTOOL_A_RINGS_TX_PUSH], &mod);
|
||||
ret = 0;
|
||||
if (!mod)
|
||||
goto out_ops;
|
||||
|
|
Loading…
Reference in New Issue