Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/ethernet/intel/igb/e1000_mac.c
	net/core/filter.c

Both conflicts were simple overlapping changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-04-24 13:19:00 -04:00
commit 4366004d77
29 changed files with 157 additions and 158 deletions

View File

@ -429,7 +429,7 @@ RPS and RFS were introduced in kernel 2.6.35. XPS was incorporated into
(therbert@google.com) (therbert@google.com)
Accelerated RFS was introduced in 2.6.35. Original patches were Accelerated RFS was introduced in 2.6.35. Original patches were
submitted by Ben Hutchings (bhutchings@solarflare.com) submitted by Ben Hutchings (bwh@kernel.org)
Authors: Authors:
Tom Herbert (therbert@google.com) Tom Herbert (therbert@google.com)

View File

@ -7674,7 +7674,6 @@ F: drivers/clk/samsung/
SAMSUNG SXGBE DRIVERS SAMSUNG SXGBE DRIVERS
M: Byungho An <bh74.an@samsung.com> M: Byungho An <bh74.an@samsung.com>
M: Girish K S <ks.giri@samsung.com> M: Girish K S <ks.giri@samsung.com>
M: Siva Reddy Kallam <siva.kallam@samsung.com>
M: Vipul Pandya <vipul.pandya@samsung.com> M: Vipul Pandya <vipul.pandya@samsung.com>
S: Supported S: Supported
L: netdev@vger.kernel.org L: netdev@vger.kernel.org

View File

@ -425,7 +425,7 @@ afterXPR:
if (cs->debug & L1_DEB_MONITOR) if (cs->debug & L1_DEB_MONITOR)
debugl1(cs, "ICC %02x -> MOX1", cs->dc.icc.mon_tx[cs->dc.icc.mon_txp - 1]); debugl1(cs, "ICC %02x -> MOX1", cs->dc.icc.mon_tx[cs->dc.icc.mon_txp - 1]);
} }
AfterMOX1: AfterMOX1: ;
#endif #endif
} }
} }

View File

@ -574,6 +574,18 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static void arc_emac_set_address_internal(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
unsigned int addr_low, addr_hi;
addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
arc_reg_set(priv, R_ADDRL, addr_low);
arc_reg_set(priv, R_ADDRH, addr_hi);
}
/** /**
* arc_emac_set_address - Set the MAC address for this device. * arc_emac_set_address - Set the MAC address for this device.
* @ndev: Pointer to net_device structure. * @ndev: Pointer to net_device structure.
@ -587,9 +599,7 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
*/ */
static int arc_emac_set_address(struct net_device *ndev, void *p) static int arc_emac_set_address(struct net_device *ndev, void *p)
{ {
struct arc_emac_priv *priv = netdev_priv(ndev);
struct sockaddr *addr = p; struct sockaddr *addr = p;
unsigned int addr_low, addr_hi;
if (netif_running(ndev)) if (netif_running(ndev))
return -EBUSY; return -EBUSY;
@ -599,11 +609,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); arc_emac_set_address_internal(ndev);
addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
arc_reg_set(priv, R_ADDRL, addr_low);
arc_reg_set(priv, R_ADDRH, addr_hi);
return 0; return 0;
} }
@ -713,6 +719,7 @@ static int arc_emac_probe(struct platform_device *pdev)
else else
eth_hw_addr_random(ndev); eth_hw_addr_random(ndev);
arc_emac_set_address_internal(ndev);
dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
/* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */

View File

@ -1167,7 +1167,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
dev_kfree_skb_any(adapter->tx_hwtstamp_skb); dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
adapter->tx_hwtstamp_skb = NULL; adapter->tx_hwtstamp_skb = NULL;
adapter->tx_hwtstamp_timeouts++; adapter->tx_hwtstamp_timeouts++;
e_warn("clearing Tx timestamp hang"); e_warn("clearing Tx timestamp hang\n");
} else { } else {
/* reschedule to check later */ /* reschedule to check later */
schedule_work(&adapter->tx_hwtstamp_work); schedule_work(&adapter->tx_hwtstamp_work);
@ -5698,7 +5698,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
static int e1000_change_mtu(struct net_device *netdev, int new_mtu) static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
/* Jumbo frame support */ /* Jumbo frame support */
if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
@ -6247,6 +6247,7 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0; return 0;
} }
#ifdef CONFIG_PM_SLEEP
static int e1000e_pm_thaw(struct device *dev) static int e1000e_pm_thaw(struct device *dev)
{ {
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
@ -6267,7 +6268,6 @@ static int e1000e_pm_thaw(struct device *dev)
return 0; return 0;
} }
#ifdef CONFIG_PM_SLEEP
static int e1000e_pm_suspend(struct device *dev) static int e1000e_pm_suspend(struct device *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);

View File

@ -4267,6 +4267,14 @@ static int i40e_open(struct net_device *netdev)
if (err) if (err)
return err; return err;
/* configure global TSO hardware offload settings */
wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
TCP_FLAG_FIN) >> 16);
wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
TCP_FLAG_FIN |
TCP_FLAG_CWR) >> 16);
wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
#ifdef CONFIG_I40E_VXLAN #ifdef CONFIG_I40E_VXLAN
vxlan_get_rx_port(netdev); vxlan_get_rx_port(netdev);
#endif #endif
@ -6767,6 +6775,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_IPV6_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_TSO6 | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_RXCSUM |
NETIF_F_NTUPLE | NETIF_F_NTUPLE |

View File

@ -160,7 +160,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
udelay(5); udelay(5);
} }
if (ret_code == I40E_ERR_TIMEOUT) if (ret_code == I40E_ERR_TIMEOUT)
hw_dbg(hw, "Done bit in GLNVM_SRCTL not set"); hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
return ret_code; return ret_code;
} }

View File

@ -239,7 +239,7 @@ static void i40e_ptp_tx_work(struct work_struct *work)
dev_kfree_skb_any(pf->ptp_tx_skb); dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL; pf->ptp_tx_skb = NULL;
pf->tx_hwtstamp_timeouts++; pf->tx_hwtstamp_timeouts++;
dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang"); dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
return; return;
} }
@ -321,7 +321,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
pf->last_rx_ptp_check = jiffies; pf->last_rx_ptp_check = jiffies;
pf->rx_hwtstamp_cleared++; pf->rx_hwtstamp_cleared++;
dev_warn(&vsi->back->pdev->dev, dev_warn(&vsi->back->pdev->dev,
"%s: clearing Rx timestamp hang", "%s: clearing Rx timestamp hang\n",
__func__); __func__);
} }
} }

View File

@ -418,7 +418,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
} }
break; break;
default: default:
dev_info(&pf->pdev->dev, "Could not specify spec type %d", dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
input->flow_type); input->flow_type);
ret = -EINVAL; ret = -EINVAL;
} }
@ -478,7 +478,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
} }
} else { } else {
dev_info(&pdev->dev, "FD filter programming error"); dev_info(&pdev->dev, "FD filter programming error\n");
} }
} else if (error == } else if (error ==
(0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
@ -1713,9 +1713,11 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
I40E_TX_FLAGS_VLAN_PRIO_SHIFT; I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
struct vlan_ethhdr *vhdr; struct vlan_ethhdr *vhdr;
if (skb_header_cloned(skb) && int rc;
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return -ENOMEM; rc = skb_cow_head(skb, 0);
if (rc < 0)
return rc;
vhdr = (struct vlan_ethhdr *)skb->data; vhdr = (struct vlan_ethhdr *)skb->data;
vhdr->h_vlan_TCI = htons(tx_flags >> vhdr->h_vlan_TCI = htons(tx_flags >>
I40E_TX_FLAGS_VLAN_SHIFT); I40E_TX_FLAGS_VLAN_SHIFT);
@ -1743,20 +1745,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
{ {
u32 cd_cmd, cd_tso_len, cd_mss; u32 cd_cmd, cd_tso_len, cd_mss;
struct ipv6hdr *ipv6h;
struct tcphdr *tcph; struct tcphdr *tcph;
struct iphdr *iph; struct iphdr *iph;
u32 l4len; u32 l4len;
int err; int err;
struct ipv6hdr *ipv6h;
if (!skb_is_gso(skb)) if (!skb_is_gso(skb))
return 0; return 0;
if (skb_header_cloned(skb)) { err = skb_cow_head(skb, 0);
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err < 0)
if (err) return err;
return err;
}
if (protocol == htons(ETH_P_IP)) { if (protocol == htons(ETH_P_IP)) {
iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);

View File

@ -365,7 +365,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
if (word_address == address) { if (word_address == address) {
*data = INVM_DWORD_TO_WORD_DATA(invm_dword); *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
hw_dbg("Read INVM Word 0x%02x = %x", hw_dbg("Read INVM Word 0x%02x = %x\n",
address, *data); address, *data);
status = E1000_SUCCESS; status = E1000_SUCCESS;
break; break;

View File

@ -928,10 +928,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
*/ */
if (hw->fc.requested_mode == e1000_fc_full) { if (hw->fc.requested_mode == e1000_fc_full) {
hw->fc.current_mode = e1000_fc_full; hw->fc.current_mode = e1000_fc_full;
hw_dbg("Flow Control = FULL.\r\n"); hw_dbg("Flow Control = FULL.\n");
} else { } else {
hw->fc.current_mode = e1000_fc_rx_pause; hw->fc.current_mode = e1000_fc_rx_pause;
hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); hw_dbg("Flow Control = RX PAUSE frames only.\n");
} }
} }
/* For receiving PAUSE frames ONLY. /* For receiving PAUSE frames ONLY.
@ -946,7 +946,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_tx_pause; hw->fc.current_mode = e1000_fc_tx_pause;
hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); hw_dbg("Flow Control = TX PAUSE frames only.\n");
} }
/* For transmitting PAUSE frames ONLY. /* For transmitting PAUSE frames ONLY.
* *
@ -960,7 +960,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_rx_pause; hw->fc.current_mode = e1000_fc_rx_pause;
hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); hw_dbg("Flow Control = RX PAUSE frames only.\n");
} }
/* Per the IEEE spec, at this point flow control should be /* Per the IEEE spec, at this point flow control should be
* disabled. However, we want to consider that we could * disabled. However, we want to consider that we could
@ -986,10 +986,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
(hw->fc.requested_mode == e1000_fc_tx_pause) || (hw->fc.requested_mode == e1000_fc_tx_pause) ||
(hw->fc.strict_ieee)) { (hw->fc.strict_ieee)) {
hw->fc.current_mode = e1000_fc_none; hw->fc.current_mode = e1000_fc_none;
hw_dbg("Flow Control = NONE.\r\n"); hw_dbg("Flow Control = NONE.\n");
} else { } else {
hw->fc.current_mode = e1000_fc_rx_pause; hw->fc.current_mode = e1000_fc_rx_pause;
hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); hw_dbg("Flow Control = RX PAUSE frames only.\n");
} }
/* Now we need to do one last check... If we auto- /* Now we need to do one last check... If we auto-

View File

@ -5193,8 +5193,10 @@ void igb_update_stats(struct igb_adapter *adapter,
rcu_read_lock(); rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
u32 rqdpc = rd32(E1000_RQDPC(i));
struct igb_ring *ring = adapter->rx_ring[i]; struct igb_ring *ring = adapter->rx_ring[i];
u32 rqdpc = rd32(E1000_RQDPC(i));
if (hw->mac.type >= e1000_i210)
wr32(E1000_RQDPC(i), 0);
if (rqdpc) { if (rqdpc) {
ring->rx_stats.drops += rqdpc; ring->rx_stats.drops += rqdpc;

View File

@ -389,7 +389,7 @@ static void igb_ptp_tx_work(struct work_struct *work)
adapter->ptp_tx_skb = NULL; adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
adapter->tx_hwtstamp_timeouts++; adapter->tx_hwtstamp_timeouts++;
dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang"); dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
return; return;
} }
@ -451,7 +451,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
rd32(E1000_RXSTMPH); rd32(E1000_RXSTMPH);
adapter->last_rx_ptp_check = jiffies; adapter->last_rx_ptp_check = jiffies;
adapter->rx_hwtstamp_cleared++; adapter->rx_hwtstamp_cleared++;
dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang"); dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n");
} }
} }

View File

@ -255,7 +255,6 @@ struct ixgbe_ring {
struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info;
}; };
unsigned long last_rx_timestamp;
unsigned long state; unsigned long state;
u8 __iomem *tail; u8 __iomem *tail;
dma_addr_t dma; /* phys. address of descriptor ring */ dma_addr_t dma; /* phys. address of descriptor ring */
@ -778,6 +777,7 @@ struct ixgbe_adapter {
unsigned long ptp_tx_start; unsigned long ptp_tx_start;
unsigned long last_overflow_check; unsigned long last_overflow_check;
unsigned long last_rx_ptp_check; unsigned long last_rx_ptp_check;
unsigned long last_rx_timestamp;
spinlock_t tmreg_lock; spinlock_t tmreg_lock;
struct cyclecounter cc; struct cyclecounter cc;
struct timecounter tc; struct timecounter tc;
@ -960,24 +960,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb);
struct sk_buff *skb);
static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
return;
__ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
/*
* Update the last_rx_timestamp timer in order to enable watchdog check
* for error case of latched timestamp on a dropped packet.
*/
rx_ring->last_rx_timestamp = jiffies;
}
int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);

View File

@ -1199,7 +1199,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
*/ */
hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
hw_dbg(hw, "Detected EEPROM page size = %d words.", hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
hw->eeprom.word_page_size); hw->eeprom.word_page_size);
out: out:
return status; return status;

View File

@ -1690,7 +1690,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb); ixgbe_rx_checksum(rx_ring, rx_desc, skb);
ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {

View File

@ -536,7 +536,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
if (time_out == max_time_out) { if (time_out == max_time_out) {
status = IXGBE_ERR_LINK_SETUP; status = IXGBE_ERR_LINK_SETUP;
hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
} }
return status; return status;
@ -745,7 +745,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
if (time_out == max_time_out) { if (time_out == max_time_out) {
status = IXGBE_ERR_LINK_SETUP; status = IXGBE_ERR_LINK_SETUP;
hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
} }
return status; return status;
@ -1175,7 +1175,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = 0; status = 0;
} else { } else {
if (hw->allow_unsupported_sfp) { if (hw->allow_unsupported_sfp) {
e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules."); e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
status = 0; status = 0;
} else { } else {
hw_dbg(hw, hw_dbg(hw,

View File

@ -435,10 +435,8 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_ring *rx_ring;
u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
unsigned long rx_event; unsigned long rx_event;
int n;
/* if we don't have a valid timestamp in the registers, just update the /* if we don't have a valid timestamp in the registers, just update the
* timeout counter and exit * timeout counter and exit
@ -450,18 +448,15 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
/* determine the most recent watchdog or rx_timestamp event */ /* determine the most recent watchdog or rx_timestamp event */
rx_event = adapter->last_rx_ptp_check; rx_event = adapter->last_rx_ptp_check;
for (n = 0; n < adapter->num_rx_queues; n++) { if (time_after(adapter->last_rx_timestamp, rx_event))
rx_ring = adapter->rx_ring[n]; rx_event = adapter->last_rx_timestamp;
if (time_after(rx_ring->last_rx_timestamp, rx_event))
rx_event = rx_ring->last_rx_timestamp;
}
/* only need to read the high RXSTMP register to clear the lock */ /* only need to read the high RXSTMP register to clear the lock */
if (time_is_before_jiffies(rx_event + 5*HZ)) { if (time_is_before_jiffies(rx_event + 5*HZ)) {
IXGBE_READ_REG(hw, IXGBE_RXSTMPH); IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
adapter->last_rx_ptp_check = jiffies; adapter->last_rx_ptp_check = jiffies;
e_warn(drv, "clearing RX Timestamp hang"); e_warn(drv, "clearing RX Timestamp hang\n");
} }
} }
@ -517,7 +512,7 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
dev_kfree_skb_any(adapter->ptp_tx_skb); dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL; adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
e_warn(drv, "clearing Tx Timestamp hang"); e_warn(drv, "clearing Tx Timestamp hang\n");
return; return;
} }
@ -530,35 +525,22 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
} }
/** /**
* __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
* @q_vector: structure containing interrupt and ring information * @adapter: pointer to adapter struct
* @skb: particular skb to send timestamp with * @skb: particular skb to send timestamp with
* *
* if the timestamp is valid, we convert it into the timecounter ns * if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the shhwtstamps structure which * value, then store that result into the shhwtstamps structure which
* is passed up the network stack * is passed up the network stack
*/ */
void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb)
struct sk_buff *skb)
{ {
struct ixgbe_adapter *adapter; struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_hw *hw;
struct skb_shared_hwtstamps *shhwtstamps; struct skb_shared_hwtstamps *shhwtstamps;
u64 regval = 0, ns; u64 regval = 0, ns;
u32 tsyncrxctl; u32 tsyncrxctl;
unsigned long flags; unsigned long flags;
/* we cannot process timestamps on a ring without a q_vector */
if (!q_vector || !q_vector->adapter)
return;
adapter = q_vector->adapter;
hw = &adapter->hw;
/*
* Read the tsyncrxctl register afterwards in order to prevent taking an
* I/O hit on every packet.
*/
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
return; return;
@ -566,13 +548,17 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
spin_lock_irqsave(&adapter->tmreg_lock, flags); spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_cyc2time(&adapter->tc, regval); ns = timecounter_cyc2time(&adapter->tc, regval);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags); spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
shhwtstamps = skb_hwtstamps(skb); shhwtstamps = skb_hwtstamps(skb);
shhwtstamps->hwtstamp = ns_to_ktime(ns); shhwtstamps->hwtstamp = ns_to_ktime(ns);
/* Update the last_rx_timestamp timer in order to enable watchdog check
* for error case of latched timestamp on a dropped packet.
*/
adapter->last_rx_timestamp = jiffies;
} }
int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)

View File

@ -45,10 +45,10 @@ static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
p->tdes23.tx_rd_des23.first_desc = is_fd; p->tdes23.tx_rd_des23.first_desc = is_fd;
p->tdes23.tx_rd_des23.buf1_size = buf1_len; p->tdes23.tx_rd_des23.buf1_size = buf1_len;
p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len; p->tdes23.tx_rd_des23.tx_pkt_len.pkt_len.total_pkt_len = pkt_len;
if (cksum) if (cksum)
p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full; p->tdes23.tx_rd_des23.cksum_ctl = cic_full;
} }
/* Set VLAN control information */ /* Set VLAN control information */

View File

@ -39,22 +39,22 @@ struct sxgbe_tx_norm_desc {
u32 int_on_com:1; u32 int_on_com:1;
/* TDES3 */ /* TDES3 */
union { union {
u32 tcp_payload_len:18; u16 tcp_payload_len;
struct { struct {
u32 total_pkt_len:15; u32 total_pkt_len:15;
u32 reserved1:1; u32 reserved1:1;
u32 cksum_ctl:2; } pkt_len;
} cksum_pktlen;
} tx_pkt_len; } tx_pkt_len;
u32 tse_bit:1; u16 cksum_ctl:2;
u32 tcp_hdr_len:4; u16 tse_bit:1;
u32 sa_insert_ctl:3; u16 tcp_hdr_len:4;
u32 crc_pad_ctl:2; u16 sa_insert_ctl:3;
u32 last_desc:1; u16 crc_pad_ctl:2;
u32 first_desc:1; u16 last_desc:1;
u32 ctxt_bit:1; u16 first_desc:1;
u32 own_bit:1; u16 ctxt_bit:1;
u16 own_bit:1;
} tx_rd_des23; } tx_rd_des23;
/* tx write back Desc 2,3 */ /* tx write back Desc 2,3 */
@ -70,25 +70,20 @@ struct sxgbe_tx_norm_desc {
struct sxgbe_rx_norm_desc { struct sxgbe_rx_norm_desc {
union { union {
u32 rdes0; /* buf1 address */ u64 rdes01; /* buf1 address */
struct { union {
u32 out_vlan_tag:16; u32 out_vlan_tag:16;
u32 in_vlan_tag:16; u32 in_vlan_tag:16;
} wb_rx_des0; u32 rss_hash;
} rd_wb_des0; } rx_wb_des01;
} rdes01;
union {
u32 rdes1; /* buf2 address or buf1[63:32] */
u32 rss_hash; /* Write-back RX */
} rd_wb_des1;
union { union {
/* RX Read format Desc 2,3 */ /* RX Read format Desc 2,3 */
struct{ struct{
/* RDES2 */ /* RDES2 */
u32 buf2_addr; u64 buf2_addr:62;
/* RDES3 */ /* RDES3 */
u32 buf2_hi_addr:30;
u32 int_on_com:1; u32 int_on_com:1;
u32 own_bit:1; u32 own_bit:1;
} rx_rd_des23; } rx_rd_des23;

View File

@ -27,7 +27,7 @@
#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */ #define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */
#define SXGBE_SMA_READ_CMD 0x03 /* read command */ #define SXGBE_SMA_READ_CMD 0x03 /* read command */
#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */ #define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
#define SXGBE_MII_BUSY 0x00800000 /* mii busy */ #define SXGBE_MII_BUSY 0x00400000 /* mii busy */
static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data) static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
{ {
@ -147,6 +147,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data; struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
int err, phy_addr; int err, phy_addr;
int *irqlist; int *irqlist;
bool phy_found = false;
bool act; bool act;
/* allocate the new mdio bus */ /* allocate the new mdio bus */
@ -162,7 +163,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
irqlist = priv->mii_irq; irqlist = priv->mii_irq;
/* assign mii bus fields */ /* assign mii bus fields */
mdio_bus->name = "samsxgbe"; mdio_bus->name = "sxgbe";
mdio_bus->read = &sxgbe_mdio_read; mdio_bus->read = &sxgbe_mdio_read;
mdio_bus->write = &sxgbe_mdio_write; mdio_bus->write = &sxgbe_mdio_write;
snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x", snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
@ -216,13 +217,22 @@ int sxgbe_mdio_register(struct net_device *ndev)
netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
phy->phy_id, phy_addr, irq_str, phy->phy_id, phy_addr, irq_str,
dev_name(&phy->dev), act ? " active" : ""); dev_name(&phy->dev), act ? " active" : "");
phy_found = true;
} }
} }
if (!phy_found) {
netdev_err(ndev, "PHY not found\n");
goto phyfound_err;
}
priv->mii = mdio_bus; priv->mii = mdio_bus;
return 0; return 0;
phyfound_err:
err = -ENODEV;
mdiobus_unregister(mdio_bus);
mdiobus_err: mdiobus_err:
mdiobus_free(mdio_bus); mdiobus_free(mdio_bus);
return err; return err;

View File

@ -147,18 +147,19 @@ MODULE_ALIAS("platform:smc91x");
*/ */
#define MII_DELAY 1 #define MII_DELAY 1
#if SMC_DEBUG > 0 #define DBG(n, dev, fmt, ...) \
#define DBG(n, dev, args...) \ do { \
do { \ if (SMC_DEBUG >= (n)) \
if (SMC_DEBUG >= (n)) \ netdev_dbg(dev, fmt, ##__VA_ARGS__); \
netdev_dbg(dev, args); \
} while (0) } while (0)
#define PRINTK(dev, args...) netdev_info(dev, args) #define PRINTK(dev, fmt, ...) \
#else do { \
#define DBG(n, dev, args...) do { } while (0) if (SMC_DEBUG > 0) \
#define PRINTK(dev, args...) netdev_dbg(dev, args) netdev_info(dev, fmt, ##__VA_ARGS__); \
#endif else \
netdev_dbg(dev, fmt, ##__VA_ARGS__); \
} while (0)
#if SMC_DEBUG > 3 #if SMC_DEBUG > 3
static void PRINT_PKT(u_char *buf, int length) static void PRINT_PKT(u_char *buf, int length)
@ -191,7 +192,7 @@ static void PRINT_PKT(u_char *buf, int length)
pr_cont("\n"); pr_cont("\n");
} }
#else #else
#define PRINT_PKT(x...) do { } while (0) static inline void PRINT_PKT(u_char *buf, int length) { }
#endif #endif
@ -1781,7 +1782,7 @@ static int smc_findirq(struct smc_local *lp)
int timeout = 20; int timeout = 20;
unsigned long cookie; unsigned long cookie;
DBG(2, dev, "%s: %s\n", CARDNAME, __func__); DBG(2, lp->dev, "%s: %s\n", CARDNAME, __func__);
cookie = probe_irq_on(); cookie = probe_irq_on();

View File

@ -246,13 +246,13 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev,
if (val1 != -1) if (val1 != -1)
newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0); newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0);
if (val2 != -1) if (val2 != -2)
newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4); newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4);
if (val3 != -1) if (val3 != -3)
newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8); newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8);
if (val4 != -1) if (val4 != -4)
newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12); newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12);
return kszphy_extended_write(phydev, reg, newval); return kszphy_extended_write(phydev, reg, newval);

View File

@ -1285,7 +1285,7 @@ static int virtnet_set_channels(struct net_device *dev,
if (channels->rx_count || channels->tx_count || channels->other_count) if (channels->rx_count || channels->tx_count || channels->other_count)
return -EINVAL; return -EINVAL;
if (queue_pairs > vi->max_queue_pairs) if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
return -EINVAL; return -EINVAL;
get_online_cpus(); get_online_cpus();

View File

@ -389,8 +389,8 @@ static inline size_t vxlan_nlmsg_size(void)
+ nla_total_size(sizeof(struct nda_cacheinfo)); + nla_total_size(sizeof(struct nda_cacheinfo));
} }
static void vxlan_fdb_notify(struct vxlan_dev *vxlan, static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
struct vxlan_fdb *fdb, int type) struct vxlan_rdst *rd, int type)
{ {
struct net *net = dev_net(vxlan->dev); struct net *net = dev_net(vxlan->dev);
struct sk_buff *skb; struct sk_buff *skb;
@ -400,8 +400,7 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
if (skb == NULL) if (skb == NULL)
goto errout; goto errout;
err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
first_remote_rtnl(fdb));
if (err < 0) { if (err < 0) {
/* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
WARN_ON(err == -EMSGSIZE); WARN_ON(err == -EMSGSIZE);
@ -427,10 +426,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
.remote_vni = VXLAN_N_VID, .remote_vni = VXLAN_N_VID,
}; };
INIT_LIST_HEAD(&f.remotes); vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
list_add_rcu(&remote.list, &f.remotes);
vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
} }
static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
@ -438,11 +434,11 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
struct vxlan_fdb f = { struct vxlan_fdb f = {
.state = NUD_STALE, .state = NUD_STALE,
}; };
struct vxlan_rdst remote = { };
INIT_LIST_HEAD(&f.remotes);
memcpy(f.eth_addr, eth_addr, ETH_ALEN); memcpy(f.eth_addr, eth_addr, ETH_ALEN);
vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
} }
/* Hash Ethernet address */ /* Hash Ethernet address */
@ -533,7 +529,8 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
/* Add/update destinations for multicast */ /* Add/update destinations for multicast */
static int vxlan_fdb_append(struct vxlan_fdb *f, static int vxlan_fdb_append(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) union vxlan_addr *ip, __be16 port, __u32 vni,
__u32 ifindex, struct vxlan_rdst **rdp)
{ {
struct vxlan_rdst *rd; struct vxlan_rdst *rd;
@ -551,6 +548,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
list_add_tail_rcu(&rd->list, &f->remotes); list_add_tail_rcu(&rd->list, &f->remotes);
*rdp = rd;
return 1; return 1;
} }
@ -690,6 +688,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
__be16 port, __u32 vni, __u32 ifindex, __be16 port, __u32 vni, __u32 ifindex,
__u8 ndm_flags) __u8 ndm_flags)
{ {
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f; struct vxlan_fdb *f;
int notify = 0; int notify = 0;
@ -726,7 +725,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
if ((flags & NLM_F_APPEND) && if ((flags & NLM_F_APPEND) &&
(is_multicast_ether_addr(f->eth_addr) || (is_multicast_ether_addr(f->eth_addr) ||
is_zero_ether_addr(f->eth_addr))) { is_zero_ether_addr(f->eth_addr))) {
int rc = vxlan_fdb_append(f, ip, port, vni, ifindex); int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
&rd);
if (rc < 0) if (rc < 0)
return rc; return rc;
@ -756,15 +756,18 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
INIT_LIST_HEAD(&f->remotes); INIT_LIST_HEAD(&f->remotes);
memcpy(f->eth_addr, mac, ETH_ALEN); memcpy(f->eth_addr, mac, ETH_ALEN);
vxlan_fdb_append(f, ip, port, vni, ifindex); vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
++vxlan->addrcnt; ++vxlan->addrcnt;
hlist_add_head_rcu(&f->hlist, hlist_add_head_rcu(&f->hlist,
vxlan_fdb_head(vxlan, mac)); vxlan_fdb_head(vxlan, mac));
} }
if (notify) if (notify) {
vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); if (rd == NULL)
rd = first_remote_rtnl(f);
vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
}
return 0; return 0;
} }
@ -785,7 +788,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
"delete %pM\n", f->eth_addr); "delete %pM\n", f->eth_addr);
--vxlan->addrcnt; --vxlan->addrcnt;
vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH); vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
hlist_del_rcu(&f->hlist); hlist_del_rcu(&f->hlist);
call_rcu(&f->rcu, vxlan_fdb_free); call_rcu(&f->rcu, vxlan_fdb_free);
@ -919,6 +922,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
*/ */
if (rd && !list_is_singular(&f->remotes)) { if (rd && !list_is_singular(&f->remotes)) {
list_del_rcu(&rd->list); list_del_rcu(&rd->list);
vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
kfree_rcu(rd, rcu); kfree_rcu(rd, rcu);
goto out; goto out;
} }
@ -993,7 +997,7 @@ static bool vxlan_snoop(struct net_device *dev,
rdst->remote_ip = *src_ip; rdst->remote_ip = *src_ip;
f->updated = jiffies; f->updated = jiffies;
vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
} else { } else {
/* learned new entry */ /* learned new entry */
spin_lock(&vxlan->hash_lock); spin_lock(&vxlan->hash_lock);

View File

@ -23,7 +23,7 @@ int sock_diag_check_cookie(void *sk, __u32 *cookie);
void sock_diag_save_cookie(void *sk, __u32 *cookie); void sock_diag_save_cookie(void *sk, __u32 *cookie);
int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk, int sock_diag_put_filterinfo(struct sock *sk,
struct sk_buff *skb, int attrtype); struct sk_buff *skb, int attrtype);
#endif #endif

View File

@ -122,6 +122,13 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
return 0; return 0;
} }
/* Register mappings for user programs. */
#define A_REG 0
#define X_REG 7
#define TMP_REG 8
#define ARG2_REG 2
#define ARG3_REG 3
/** /**
* __sk_run_filter - run a filter on a given context * __sk_run_filter - run a filter on a given context
* @ctx: buffer to run the filter on * @ctx: buffer to run the filter on
@ -242,6 +249,8 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
regs[FP_REG] = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; regs[FP_REG] = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
regs[ARG1_REG] = (u64) (unsigned long) ctx; regs[ARG1_REG] = (u64) (unsigned long) ctx;
regs[A_REG] = 0;
regs[X_REG] = 0;
select_insn: select_insn:
goto *jumptable[insn->code]; goto *jumptable[insn->code];
@ -649,13 +658,6 @@ static u64 __get_random_u32(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
return (u64)prandom_u32(); return (u64)prandom_u32();
} }
/* Register mappings for user programs. */
#define A_REG 0
#define X_REG 7
#define TMP_REG 8
#define ARG2_REG 2
#define ARG3_REG 3
static bool convert_bpf_extensions(struct sock_filter *fp, static bool convert_bpf_extensions(struct sock_filter *fp,
struct sock_filter_int **insnp) struct sock_filter_int **insnp)
{ {

View File

@ -49,7 +49,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
} }
EXPORT_SYMBOL_GPL(sock_diag_put_meminfo); EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk, int sock_diag_put_filterinfo(struct sock *sk,
struct sk_buff *skb, int attrtype) struct sk_buff *skb, int attrtype)
{ {
struct sock_fprog_kern *fprog; struct sock_fprog_kern *fprog;
@ -58,7 +58,7 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
unsigned int flen; unsigned int flen;
int err = 0; int err = 0;
if (!ns_capable(user_ns, CAP_NET_ADMIN)) { if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
nla_reserve(skb, attrtype, 0); nla_reserve(skb, attrtype, 0);
return 0; return 0;
} }

View File

@ -172,7 +172,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
goto out_nlmsg_trim; goto out_nlmsg_trim;
if ((req->pdiag_show & PACKET_SHOW_FILTER) && if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
sock_diag_put_filterinfo(user_ns, sk, skb, PACKET_DIAG_FILTER)) sock_diag_put_filterinfo(sk, skb, PACKET_DIAG_FILTER))
goto out_nlmsg_trim; goto out_nlmsg_trim;
return nlmsg_end(skb, nlh); return nlmsg_end(skb, nlh);