net: hns3: add ethtool_ops.set_coalesce support to PF
This patch adds ethtool_ops.set_coalesce support to PF. Signed-off-by: Fuyun Liang <liangfuyun1@huawei.com> Signed-off-by: Peng Li <lipeng321@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
7e96adc466
commit
434776a5fa
|
@ -170,14 +170,40 @@ static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
|
|||
writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
|
||||
}
|
||||
|
||||
static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
|
||||
u32 rl_value)
|
||||
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
|
||||
u32 rl_value)
|
||||
{
|
||||
u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
|
||||
|
||||
/* this defines the configuration for RL (Interrupt Rate Limiter).
|
||||
* Rl defines rate of interrupts i.e. number of interrupts-per-second
|
||||
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
|
||||
*/
|
||||
writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
|
||||
|
||||
if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable &&
|
||||
!tqp_vector->rx_group.gl_adapt_enable)
|
||||
/* According to the hardware, the range of rl_reg is
|
||||
* 0-59 and the unit is 4.
|
||||
*/
|
||||
rl_reg |= HNS3_INT_RL_ENABLE_MASK;
|
||||
|
||||
writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
|
||||
}
|
||||
|
||||
void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
|
||||
u32 gl_value)
|
||||
{
|
||||
u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
|
||||
|
||||
writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
|
||||
}
|
||||
|
||||
void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
|
||||
u32 gl_value)
|
||||
{
|
||||
u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
|
||||
|
||||
writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
|
||||
}
|
||||
|
||||
static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
|
||||
|
@ -194,7 +220,7 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
|
|||
/* for now we are disabling Interrupt RL - we
|
||||
* will re-enable later
|
||||
*/
|
||||
hns3_set_vector_coalesc_rl(tqp_vector, 0);
|
||||
hns3_set_vector_coalesce_rl(tqp_vector, 0);
|
||||
tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
|
||||
tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
|
||||
}
|
||||
|
|
|
@ -451,11 +451,15 @@ enum hns3_link_mode_bits {
|
|||
HNS3_LM_COUNT = 15
|
||||
};
|
||||
|
||||
#define HNS3_INT_GL_MAX 0x1FE0
|
||||
#define HNS3_INT_GL_50K 0x000A
|
||||
#define HNS3_INT_GL_20K 0x0019
|
||||
#define HNS3_INT_GL_18K 0x001B
|
||||
#define HNS3_INT_GL_8K 0x003E
|
||||
|
||||
#define HNS3_INT_RL_MAX 0x00EC
|
||||
#define HNS3_INT_RL_ENABLE_MASK 0x40
|
||||
|
||||
struct hns3_enet_ring_group {
|
||||
/* array of pointers to rings */
|
||||
struct hns3_enet_ring *ring;
|
||||
|
@ -595,6 +599,12 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
|
|||
#define hns3_get_handle(ndev) \
|
||||
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
|
||||
|
||||
#define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
|
||||
#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
|
||||
|
||||
#define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
|
||||
#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
|
||||
|
||||
void hns3_ethtool_set_ops(struct net_device *netdev);
|
||||
int hns3_set_channels(struct net_device *netdev,
|
||||
struct ethtool_channels *ch);
|
||||
|
@ -607,6 +617,13 @@ int hns3_clean_rx_ring(
|
|||
struct hns3_enet_ring *ring, int budget,
|
||||
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
|
||||
|
||||
void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
|
||||
u32 gl_value);
|
||||
void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
|
||||
u32 gl_value);
|
||||
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
|
||||
u32 rl_value);
|
||||
|
||||
#ifdef CONFIG_HNS3_DCB
|
||||
void hns3_dcbnl_setup(struct hnae3_handle *handle);
|
||||
#else
|
||||
|
|
|
@ -923,6 +923,146 @@ static int hns3_get_coalesce(struct net_device *netdev,
|
|||
return hns3_get_coalesce_per_queue(netdev, 0, cmd);
|
||||
}
|
||||
|
||||
static int hns3_check_gl_coalesce_para(struct net_device *netdev,
|
||||
struct ethtool_coalesce *cmd)
|
||||
{
|
||||
u32 rx_gl, tx_gl;
|
||||
|
||||
if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) {
|
||||
netdev_err(netdev,
|
||||
"Invalid rx-usecs value, rx-usecs range is 0-%d\n",
|
||||
HNS3_INT_GL_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) {
|
||||
netdev_err(netdev,
|
||||
"Invalid tx-usecs value, tx-usecs range is 0-%d\n",
|
||||
HNS3_INT_GL_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
|
||||
if (rx_gl != cmd->rx_coalesce_usecs) {
|
||||
netdev_info(netdev,
|
||||
"rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
|
||||
cmd->rx_coalesce_usecs, rx_gl);
|
||||
}
|
||||
|
||||
tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
|
||||
if (tx_gl != cmd->tx_coalesce_usecs) {
|
||||
netdev_info(netdev,
|
||||
"tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
|
||||
cmd->tx_coalesce_usecs, tx_gl);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_check_rl_coalesce_para(struct net_device *netdev,
|
||||
struct ethtool_coalesce *cmd)
|
||||
{
|
||||
u32 rl;
|
||||
|
||||
if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) {
|
||||
netdev_err(netdev,
|
||||
"tx_usecs_high must be same as rx_usecs_high.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) {
|
||||
netdev_err(netdev,
|
||||
"Invalid usecs_high value, usecs_high range is 0-%d\n",
|
||||
HNS3_INT_RL_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
|
||||
if (rl != cmd->rx_coalesce_usecs_high) {
|
||||
netdev_info(netdev,
|
||||
"usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
|
||||
cmd->rx_coalesce_usecs_high, rl);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_check_coalesce_para(struct net_device *netdev,
|
||||
struct ethtool_coalesce *cmd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = hns3_check_gl_coalesce_para(netdev, cmd);
|
||||
if (ret) {
|
||||
netdev_err(netdev,
|
||||
"Check gl coalesce param fail. ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hns3_check_rl_coalesce_para(netdev, cmd);
|
||||
if (ret) {
|
||||
netdev_err(netdev,
|
||||
"Check rl coalesce param fail. ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (cmd->use_adaptive_tx_coalesce == 1 ||
|
||||
cmd->use_adaptive_rx_coalesce == 1) {
|
||||
netdev_info(netdev,
|
||||
"adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
|
||||
cmd->use_adaptive_tx_coalesce,
|
||||
cmd->use_adaptive_rx_coalesce);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hns3_set_coalesce_per_queue(struct net_device *netdev,
|
||||
struct ethtool_coalesce *cmd,
|
||||
u32 queue)
|
||||
{
|
||||
struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
int queue_num = h->kinfo.num_tqps;
|
||||
|
||||
tx_vector = priv->ring_data[queue].ring->tqp_vector;
|
||||
rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
|
||||
|
||||
tx_vector->tx_group.gl_adapt_enable = cmd->use_adaptive_tx_coalesce;
|
||||
rx_vector->rx_group.gl_adapt_enable = cmd->use_adaptive_rx_coalesce;
|
||||
|
||||
tx_vector->tx_group.int_gl = cmd->tx_coalesce_usecs;
|
||||
rx_vector->rx_group.int_gl = cmd->rx_coalesce_usecs;
|
||||
|
||||
hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.int_gl);
|
||||
hns3_set_vector_coalesce_rx_gl(rx_vector, rx_vector->rx_group.int_gl);
|
||||
|
||||
hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
|
||||
hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
|
||||
}
|
||||
|
||||
static int hns3_set_coalesce(struct net_device *netdev,
|
||||
struct ethtool_coalesce *cmd)
|
||||
{
|
||||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
||||
u16 queue_num = h->kinfo.num_tqps;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ret = hns3_check_coalesce_para(netdev, cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
h->kinfo.int_rl_setting =
|
||||
hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
|
||||
|
||||
for (i = 0; i < queue_num; i++)
|
||||
hns3_set_coalesce_per_queue(netdev, cmd, i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops hns3vf_ethtool_ops = {
|
||||
.get_drvinfo = hns3_get_drvinfo,
|
||||
.get_ringparam = hns3_get_ringparam,
|
||||
|
@ -962,6 +1102,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
|
|||
.get_channels = hns3_get_channels,
|
||||
.set_channels = hns3_set_channels,
|
||||
.get_coalesce = hns3_get_coalesce,
|
||||
.set_coalesce = hns3_set_coalesce,
|
||||
};
|
||||
|
||||
void hns3_ethtool_set_ops(struct net_device *netdev)
|
||||
|
|
Loading…
Reference in New Issue