net: thunderx: rework mac addresses list to u64 array
It is too expensive to pass u64 values via linked list, instead
allocate array for them by overall number of mac addresses from netdev.
This eventually removes multiple kmalloc() calls, aviod memory
fragmentation and allow to put single null check on kmalloc
return value in order to prevent a potential null pointer dereference.
Addresses-Coverity-ID: 1467429 ("Dereference null return value")
Fixes: 37c3347eb2
("net: thunderx: add ndo_set_rx_mode callback implementation for VF")
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b6a37e5e25
commit
9b5c4dfb2a
|
@ -265,14 +265,9 @@ struct nicvf_drv_stats {
|
||||||
|
|
||||||
struct cavium_ptp;
|
struct cavium_ptp;
|
||||||
|
|
||||||
struct xcast_addr {
|
|
||||||
struct list_head list;
|
|
||||||
u64 addr;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct xcast_addr_list {
|
struct xcast_addr_list {
|
||||||
struct list_head list;
|
|
||||||
int count;
|
int count;
|
||||||
|
u64 mc[];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nicvf_work {
|
struct nicvf_work {
|
||||||
|
|
|
@ -1929,7 +1929,7 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
|
||||||
work.work);
|
work.work);
|
||||||
struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
|
struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
|
||||||
union nic_mbx mbx = {};
|
union nic_mbx mbx = {};
|
||||||
struct xcast_addr *xaddr, *next;
|
int idx;
|
||||||
|
|
||||||
if (!vf_work)
|
if (!vf_work)
|
||||||
return;
|
return;
|
||||||
|
@ -1956,16 +1956,10 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
|
||||||
/* check if we have any specific MACs to be added to PF DMAC filter */
|
/* check if we have any specific MACs to be added to PF DMAC filter */
|
||||||
if (vf_work->mc) {
|
if (vf_work->mc) {
|
||||||
/* now go through kernel list of MACs and add them one by one */
|
/* now go through kernel list of MACs and add them one by one */
|
||||||
list_for_each_entry_safe(xaddr, next,
|
for (idx = 0; idx < vf_work->mc->count; idx++) {
|
||||||
&vf_work->mc->list, list) {
|
|
||||||
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
|
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
|
||||||
mbx.xcast.data.mac = xaddr->addr;
|
mbx.xcast.data.mac = vf_work->mc->mc[idx];
|
||||||
nicvf_send_msg_to_pf(nic, &mbx);
|
nicvf_send_msg_to_pf(nic, &mbx);
|
||||||
|
|
||||||
/* after receiving ACK from PF release memory */
|
|
||||||
list_del(&xaddr->list);
|
|
||||||
kfree(xaddr);
|
|
||||||
vf_work->mc->count--;
|
|
||||||
}
|
}
|
||||||
kfree(vf_work->mc);
|
kfree(vf_work->mc);
|
||||||
}
|
}
|
||||||
|
@ -1996,17 +1990,15 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
|
||||||
mode |= BGX_XCAST_MCAST_FILTER;
|
mode |= BGX_XCAST_MCAST_FILTER;
|
||||||
/* here we need to copy mc addrs */
|
/* here we need to copy mc addrs */
|
||||||
if (netdev_mc_count(netdev)) {
|
if (netdev_mc_count(netdev)) {
|
||||||
struct xcast_addr *xaddr;
|
mc_list = kmalloc(offsetof(typeof(*mc_list),
|
||||||
|
mc[netdev_mc_count(netdev)]),
|
||||||
mc_list = kmalloc(sizeof(*mc_list), GFP_ATOMIC);
|
|
||||||
INIT_LIST_HEAD(&mc_list->list);
|
|
||||||
netdev_hw_addr_list_for_each(ha, &netdev->mc) {
|
|
||||||
xaddr = kmalloc(sizeof(*xaddr),
|
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
xaddr->addr =
|
if (unlikely(!mc_list))
|
||||||
|
return;
|
||||||
|
mc_list->count = 0;
|
||||||
|
netdev_hw_addr_list_for_each(ha, &netdev->mc) {
|
||||||
|
mc_list->mc[mc_list->count] =
|
||||||
ether_addr_to_u64(ha->addr);
|
ether_addr_to_u64(ha->addr);
|
||||||
list_add_tail(&xaddr->list,
|
|
||||||
&mc_list->list);
|
|
||||||
mc_list->count++;
|
mc_list->count++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue