Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (73 commits) netfilter: Remove ADVANCED dependency from NF_CONNTRACK_NETBIOS_NS ipv4: flush route cache after change accept_local sch_red: fix red_change Revert "udp: remove redundant variable" bridge: master device stuck in no-carrier state forever when in user-stp mode ipv4: Perform peer validation on cached route lookup. net/core: fix rollback handler in register_netdevice_notifier sch_red: fix red_calc_qavg_from_idle_time bonding: only use primary address for ARP ipv4: fix lockdep splat in rt_cache_seq_show sch_teql: fix lockdep splat net: fec: Select the FEC driver by default for i.MX SoCs isdn: avoid copying too long drvid isdn: make sure strings are null terminated netlabel: Fix build problems when IPv6 is not enabled sctp: better integer overflow check in sctp_auth_create_key() sctp: integer overflow in sctp_auth_create_key() ipv6: Set mcast_hops to IPV6_DEFAULT_MCASTHOPS when -1 was given. net: Fix corruption in /proc/*/net/dev_mcast mac80211: fix race between the AGG SM and the Tx data path ...
This commit is contained in:
commit
5983fe2b29
|
@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
|
|||
case IIOCDOCFINT:
|
||||
if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
|
||||
return (-EINVAL); /* invalid driver */
|
||||
if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
|
||||
sizeof(dioctl.cf_ctrl.msn))
|
||||
return -EINVAL;
|
||||
if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
|
||||
sizeof(dioctl.cf_ctrl.fwd_nr))
|
||||
return -EINVAL;
|
||||
if ((i = cf_command(dioctl.cf_ctrl.drvid,
|
||||
(cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
|
||||
dioctl.cf_ctrl.cfproc,
|
||||
|
|
|
@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
|
|||
char *c,
|
||||
*e;
|
||||
|
||||
if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
|
||||
sizeof(cfg->drvid))
|
||||
return -EINVAL;
|
||||
drvidx = -1;
|
||||
chidx = -1;
|
||||
strcpy(drvid, cfg->drvid);
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
menuconfig ARCNET
|
||||
depends on NETDEVICES && (ISA || PCI || PCMCIA)
|
||||
bool "ARCnet support"
|
||||
tristate "ARCnet support"
|
||||
---help---
|
||||
If you have a network card of this type, say Y and check out the
|
||||
(arguably) beautiful poetry in
|
||||
|
|
|
@ -2553,30 +2553,6 @@ re_arm:
|
|||
}
|
||||
}
|
||||
|
||||
static __be32 bond_glean_dev_ip(struct net_device *dev)
|
||||
{
|
||||
struct in_device *idev;
|
||||
struct in_ifaddr *ifa;
|
||||
__be32 addr = 0;
|
||||
|
||||
if (!dev)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
idev = __in_dev_get_rcu(dev);
|
||||
if (!idev)
|
||||
goto out;
|
||||
|
||||
ifa = idev->ifa_list;
|
||||
if (!ifa)
|
||||
goto out;
|
||||
|
||||
addr = ifa->ifa_local;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return addr;
|
||||
}
|
||||
|
||||
static int bond_has_this_ip(struct bonding *bond, __be32 ip)
|
||||
{
|
||||
struct vlan_entry *vlan;
|
||||
|
@ -3322,6 +3298,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
|
|||
struct bonding *bond;
|
||||
struct vlan_entry *vlan;
|
||||
|
||||
/* we only care about primary address */
|
||||
if(ifa->ifa_flags & IFA_F_SECONDARY)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
list_for_each_entry(bond, &bn->dev_list, bond_list) {
|
||||
if (bond->dev == event_dev) {
|
||||
switch (event) {
|
||||
|
@ -3329,7 +3309,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
|
|||
bond->master_ip = ifa->ifa_local;
|
||||
return NOTIFY_OK;
|
||||
case NETDEV_DOWN:
|
||||
bond->master_ip = bond_glean_dev_ip(bond->dev);
|
||||
bond->master_ip = 0;
|
||||
return NOTIFY_OK;
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
|
@ -3345,8 +3325,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
|
|||
vlan->vlan_ip = ifa->ifa_local;
|
||||
return NOTIFY_OK;
|
||||
case NETDEV_DOWN:
|
||||
vlan->vlan_ip =
|
||||
bond_glean_dev_ip(vlan_dev);
|
||||
vlan->vlan_ip = 0;
|
||||
return NOTIFY_OK;
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
|
|
@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp)
|
|||
skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
rp->skb = NULL;
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb_irq(skb);
|
||||
}
|
||||
|
||||
bp->tx_cons = cons;
|
||||
|
|
|
@ -10327,6 +10327,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
|
||||
struct link_params *params, u8 mode)
|
||||
{
|
||||
struct bnx2x *bp = params->bp;
|
||||
u16 temp;
|
||||
|
||||
bnx2x_cl22_write(bp, phy,
|
||||
MDIO_REG_GPHY_SHADOW,
|
||||
MDIO_REG_GPHY_SHADOW_LED_SEL1);
|
||||
bnx2x_cl22_read(bp, phy,
|
||||
MDIO_REG_GPHY_SHADOW,
|
||||
&temp);
|
||||
temp &= 0xff00;
|
||||
|
||||
DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
|
||||
switch (mode) {
|
||||
case LED_MODE_FRONT_PANEL_OFF:
|
||||
case LED_MODE_OFF:
|
||||
temp |= 0x00ee;
|
||||
break;
|
||||
case LED_MODE_OPER:
|
||||
temp |= 0x0001;
|
||||
break;
|
||||
case LED_MODE_ON:
|
||||
temp |= 0x00ff;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
bnx2x_cl22_write(bp, phy,
|
||||
MDIO_REG_GPHY_SHADOW,
|
||||
MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
|
||||
struct link_params *params)
|
||||
{
|
||||
|
@ -11103,7 +11140,7 @@ static struct bnx2x_phy phy_54618se = {
|
|||
.config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
|
||||
.format_fw_ver = (format_fw_ver_t)NULL,
|
||||
.hw_reset = (hw_reset_t)NULL,
|
||||
.set_link_led = (set_link_led_t)NULL,
|
||||
.set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
|
||||
.phy_specific_func = (phy_specific_func_t)NULL
|
||||
};
|
||||
/*****************************************************************/
|
||||
|
|
|
@ -6990,6 +6990,7 @@ Theotherbitsarereservedandshouldbezero*/
|
|||
#define MDIO_REG_INTR_MASK 0x1b
|
||||
#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
|
||||
#define MDIO_REG_GPHY_SHADOW 0x1c
|
||||
#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
|
||||
#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
|
||||
#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
|
||||
#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
|
||||
|
|
|
@ -613,7 +613,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
|
|||
|
||||
if (!dm->wake_state)
|
||||
irq_set_irq_wake(dm->irq_wake, 1);
|
||||
else if (dm->wake_state & !opts)
|
||||
else if (dm->wake_state && !opts)
|
||||
irq_set_irq_wake(dm->irq_wake, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ config FEC
|
|||
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
|
||||
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
|
||||
ARCH_MXC || ARCH_MXS)
|
||||
default ARCH_MXC || ARCH_MXS if ARM
|
||||
select PHYLIB
|
||||
---help---
|
||||
Say Y here if you want to use the built-in 10/100 Fast ethernet
|
||||
|
|
|
@ -61,9 +61,9 @@
|
|||
#ifdef EHEA_SMALL_QUEUES
|
||||
#define EHEA_MAX_CQE_COUNT 1023
|
||||
#define EHEA_DEF_ENTRIES_SQ 1023
|
||||
#define EHEA_DEF_ENTRIES_RQ1 4095
|
||||
#define EHEA_DEF_ENTRIES_RQ1 1023
|
||||
#define EHEA_DEF_ENTRIES_RQ2 1023
|
||||
#define EHEA_DEF_ENTRIES_RQ3 1023
|
||||
#define EHEA_DEF_ENTRIES_RQ3 511
|
||||
#else
|
||||
#define EHEA_MAX_CQE_COUNT 4080
|
||||
#define EHEA_DEF_ENTRIES_SQ 4080
|
||||
|
|
|
@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work)
|
|||
out_herr:
|
||||
free_page((unsigned long)cb2);
|
||||
resched:
|
||||
schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
|
||||
schedule_delayed_work(&port->stats_work,
|
||||
round_jiffies_relative(msecs_to_jiffies(1000)));
|
||||
}
|
||||
|
||||
static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
|
||||
|
@ -2434,7 +2435,8 @@ static int ehea_open(struct net_device *dev)
|
|||
}
|
||||
|
||||
mutex_unlock(&port->port_lock);
|
||||
schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
|
||||
schedule_delayed_work(&port->stats_work,
|
||||
round_jiffies_relative(msecs_to_jiffies(1000)));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
|
|||
|
||||
/* FIXME: do we need this? */
|
||||
memset(local_list, 0, sizeof(local_list));
|
||||
memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG));
|
||||
memset(remote_list, 0, sizeof(remote_list));
|
||||
|
||||
/* a 0 address marks the end of the valid entries */
|
||||
if (senddata->addr[startchunk] == 0)
|
||||
|
|
|
@ -1744,6 +1744,112 @@ jme_phy_off(struct jme_adapter *jme)
|
|||
jme_new_phy_off(jme);
|
||||
}
|
||||
|
||||
static int
|
||||
jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
|
||||
{
|
||||
u32 phy_addr;
|
||||
|
||||
phy_addr = JM_PHY_SPEC_REG_READ | specreg;
|
||||
jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
|
||||
phy_addr);
|
||||
return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
|
||||
JM_PHY_SPEC_DATA_REG);
|
||||
}
|
||||
|
||||
static void
|
||||
jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
|
||||
{
|
||||
u32 phy_addr;
|
||||
|
||||
phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
|
||||
jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
|
||||
phy_data);
|
||||
jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
|
||||
phy_addr);
|
||||
}
|
||||
|
||||
static int
|
||||
jme_phy_calibration(struct jme_adapter *jme)
|
||||
{
|
||||
u32 ctrl1000, phy_data;
|
||||
|
||||
jme_phy_off(jme);
|
||||
jme_phy_on(jme);
|
||||
/* Enabel PHY test mode 1 */
|
||||
ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
|
||||
ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
|
||||
ctrl1000 |= PHY_GAD_TEST_MODE_1;
|
||||
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
|
||||
|
||||
phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
|
||||
phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
|
||||
phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
|
||||
JM_PHY_EXT_COMM_2_CALI_ENABLE;
|
||||
jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
|
||||
msleep(20);
|
||||
phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
|
||||
phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
|
||||
JM_PHY_EXT_COMM_2_CALI_MODE_0 |
|
||||
JM_PHY_EXT_COMM_2_CALI_LATCH);
|
||||
jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
|
||||
|
||||
/* Disable PHY test mode */
|
||||
ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
|
||||
ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
|
||||
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
jme_phy_setEA(struct jme_adapter *jme)
|
||||
{
|
||||
u32 phy_comm0 = 0, phy_comm1 = 0;
|
||||
u8 nic_ctrl;
|
||||
|
||||
pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
|
||||
if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
|
||||
return 0;
|
||||
|
||||
switch (jme->pdev->device) {
|
||||
case PCI_DEVICE_ID_JMICRON_JMC250:
|
||||
if (((jme->chip_main_rev == 5) &&
|
||||
((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
|
||||
(jme->chip_sub_rev == 3))) ||
|
||||
(jme->chip_main_rev >= 6)) {
|
||||
phy_comm0 = 0x008A;
|
||||
phy_comm1 = 0x4109;
|
||||
}
|
||||
if ((jme->chip_main_rev == 3) &&
|
||||
((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
|
||||
phy_comm0 = 0xE088;
|
||||
break;
|
||||
case PCI_DEVICE_ID_JMICRON_JMC260:
|
||||
if (((jme->chip_main_rev == 5) &&
|
||||
((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
|
||||
(jme->chip_sub_rev == 3))) ||
|
||||
(jme->chip_main_rev >= 6)) {
|
||||
phy_comm0 = 0x008A;
|
||||
phy_comm1 = 0x4109;
|
||||
}
|
||||
if ((jme->chip_main_rev == 3) &&
|
||||
((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
|
||||
phy_comm0 = 0xE088;
|
||||
if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
|
||||
phy_comm0 = 0x608A;
|
||||
if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
|
||||
phy_comm0 = 0x408A;
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
if (phy_comm0)
|
||||
jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
|
||||
if (phy_comm1)
|
||||
jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
jme_open(struct net_device *netdev)
|
||||
{
|
||||
|
@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev)
|
|||
jme_set_settings(netdev, &jme->old_ecmd);
|
||||
else
|
||||
jme_reset_phy_processor(jme);
|
||||
|
||||
jme_phy_calibration(jme);
|
||||
jme_phy_setEA(jme);
|
||||
jme_reset_link(jme);
|
||||
|
||||
return 0;
|
||||
|
@ -3184,7 +3291,8 @@ jme_resume(struct device *dev)
|
|||
jme_set_settings(netdev, &jme->old_ecmd);
|
||||
else
|
||||
jme_reset_phy_processor(jme);
|
||||
|
||||
jme_phy_calibration(jme);
|
||||
jme_phy_setEA(jme);
|
||||
jme_start_irq(jme);
|
||||
netif_device_attach(netdev);
|
||||
|
||||
|
@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
|
|||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
|
||||
|
||||
|
|
|
@ -760,6 +760,25 @@ enum jme_rxmcs_bits {
|
|||
RXMCS_CHECKSUM,
|
||||
};
|
||||
|
||||
/* Extern PHY common register 2 */
|
||||
|
||||
#define PHY_GAD_TEST_MODE_1 0x00002000
|
||||
#define PHY_GAD_TEST_MODE_MSK 0x0000E000
|
||||
#define JM_PHY_SPEC_REG_READ 0x00004000
|
||||
#define JM_PHY_SPEC_REG_WRITE 0x00008000
|
||||
#define PHY_CALIBRATION_DELAY 20
|
||||
#define JM_PHY_SPEC_ADDR_REG 0x1E
|
||||
#define JM_PHY_SPEC_DATA_REG 0x1F
|
||||
|
||||
#define JM_PHY_EXT_COMM_0_REG 0x30
|
||||
#define JM_PHY_EXT_COMM_1_REG 0x31
|
||||
#define JM_PHY_EXT_COMM_2_REG 0x32
|
||||
#define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01
|
||||
#define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02
|
||||
#define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10
|
||||
#define PCI_PRIV_SHARE_NICCTRL 0xF5
|
||||
#define JME_FLAG_PHYEA_ENABLE 0x2
|
||||
|
||||
/*
|
||||
* Wakeup Frame setup interface registers
|
||||
*/
|
||||
|
|
|
@ -58,10 +58,8 @@
|
|||
|
||||
|
||||
#define TX_DESC_PER_IOCB 8
|
||||
/* The maximum number of frags we handle is based
|
||||
* on PAGE_SIZE...
|
||||
*/
|
||||
#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
|
||||
|
||||
#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
|
||||
#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
|
||||
#else /* all other page sizes */
|
||||
#define TX_DESC_PER_OAL 0
|
||||
|
@ -1353,7 +1351,7 @@ struct tx_ring_desc {
|
|||
struct ob_mac_iocb_req *queue_entry;
|
||||
u32 index;
|
||||
struct oal oal;
|
||||
struct map_list map[MAX_SKB_FRAGS + 1];
|
||||
struct map_list map[MAX_SKB_FRAGS + 2];
|
||||
int map_cnt;
|
||||
struct tx_ring_desc *next;
|
||||
};
|
||||
|
|
|
@ -781,10 +781,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
|
|||
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
|
||||
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
|
||||
|
||||
/* Do not manage MMC IRQ (FIXME) */
|
||||
/* Mask MMC irq, counters are managed in SW and registers
|
||||
* are cleared on each READ eventually. */
|
||||
dwmac_mmc_intr_all_mask(priv->ioaddr);
|
||||
dwmac_mmc_ctrl(priv->ioaddr, mode);
|
||||
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
|
||||
|
||||
if (priv->dma_cap.rmon) {
|
||||
dwmac_mmc_ctrl(priv->ioaddr, mode);
|
||||
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
|
||||
} else
|
||||
pr_info(" No MAC Management Counters available");
|
||||
}
|
||||
|
||||
static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
|
||||
|
@ -1012,8 +1017,7 @@ static int stmmac_open(struct net_device *dev)
|
|||
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
|
||||
priv->xstats.threshold = tc;
|
||||
|
||||
if (priv->dma_cap.rmon)
|
||||
stmmac_mmc_setup(priv);
|
||||
stmmac_mmc_setup(priv);
|
||||
|
||||
/* Start the ball rolling... */
|
||||
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#
|
||||
|
||||
menuconfig PHYLIB
|
||||
bool "PHY Device support and infrastructure"
|
||||
tristate "PHY Device support and infrastructure"
|
||||
depends on !S390
|
||||
depends on NETDEVICES
|
||||
help
|
||||
|
|
|
@ -588,8 +588,6 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
|
|||
|
||||
WARN_ON(priv->fw_state != FW_STATE_READY);
|
||||
|
||||
cancel_work_sync(&priv->work);
|
||||
|
||||
p54spi_power_off(priv);
|
||||
spin_lock_irqsave(&priv->tx_lock, flags);
|
||||
INIT_LIST_HEAD(&priv->tx_pending);
|
||||
|
@ -597,6 +595,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
|
|||
|
||||
priv->fw_state = FW_STATE_OFF;
|
||||
mutex_unlock(&priv->mutex);
|
||||
|
||||
cancel_work_sync(&priv->work);
|
||||
}
|
||||
|
||||
static int __devinit p54spi_probe(struct spi_device *spi)
|
||||
|
@ -656,6 +656,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
|
|||
init_completion(&priv->fw_comp);
|
||||
INIT_LIST_HEAD(&priv->tx_pending);
|
||||
mutex_init(&priv->mutex);
|
||||
spin_lock_init(&priv->tx_lock);
|
||||
SET_IEEE80211_DEV(hw, &spi->dev);
|
||||
priv->common.open = p54spi_op_start;
|
||||
priv->common.stop = p54spi_op_stop;
|
||||
|
|
|
@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
|
|||
dwrq->flags = 0;
|
||||
dwrq->length = 0;
|
||||
}
|
||||
essid->octets[essid->length] = '\0';
|
||||
essid->octets[dwrq->length] = '\0';
|
||||
memcpy(extra, essid->octets, dwrq->length);
|
||||
kfree(essid);
|
||||
|
||||
|
|
|
@ -3771,7 +3771,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
|
|||
/* Apparently the data is read from end to start */
|
||||
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, ®);
|
||||
/* The returned value is in CPU order, but eeprom is le */
|
||||
rt2x00dev->eeprom[i] = cpu_to_le32(reg);
|
||||
*(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
|
||||
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, ®);
|
||||
*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
|
||||
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, ®);
|
||||
|
|
|
@ -395,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
|
|||
if (mac->link_state != MAC80211_LINKED)
|
||||
return;
|
||||
|
||||
spin_lock(&rtlpriv->locks.lps_lock);
|
||||
spin_lock_irq(&rtlpriv->locks.lps_lock);
|
||||
|
||||
/* Idle for a while if we connect to AP a while ago. */
|
||||
if (mac->cnt_after_linked >= 2) {
|
||||
|
@ -407,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
|
|||
}
|
||||
}
|
||||
|
||||
spin_unlock(&rtlpriv->locks.lps_lock);
|
||||
spin_unlock_irq(&rtlpriv->locks.lps_lock);
|
||||
}
|
||||
|
||||
/*Leave the leisure power save mode.*/
|
||||
|
@ -416,8 +416,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
|
|||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
|
||||
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&rtlpriv->locks.lps_lock);
|
||||
spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
|
||||
|
||||
if (ppsc->fwctrl_lps) {
|
||||
if (ppsc->dot11_psmode != EACTIVE) {
|
||||
|
@ -438,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
|
|||
rtl_lps_set_psmode(hw, EACTIVE);
|
||||
}
|
||||
}
|
||||
spin_unlock(&rtlpriv->locks.lps_lock);
|
||||
spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
|
||||
}
|
||||
|
||||
/* For sw LPS*/
|
||||
|
@ -539,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
|
|||
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
|
||||
}
|
||||
|
||||
spin_lock(&rtlpriv->locks.lps_lock);
|
||||
spin_lock_irq(&rtlpriv->locks.lps_lock);
|
||||
rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
|
||||
spin_unlock(&rtlpriv->locks.lps_lock);
|
||||
spin_unlock_irq(&rtlpriv->locks.lps_lock);
|
||||
}
|
||||
|
||||
void rtl_swlps_rfon_wq_callback(void *data)
|
||||
|
@ -574,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
|
|||
if (rtlpriv->link_info.busytraffic)
|
||||
return;
|
||||
|
||||
spin_lock(&rtlpriv->locks.lps_lock);
|
||||
spin_lock_irq(&rtlpriv->locks.lps_lock);
|
||||
rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
|
||||
spin_unlock(&rtlpriv->locks.lps_lock);
|
||||
spin_unlock_irq(&rtlpriv->locks.lps_lock);
|
||||
|
||||
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
|
||||
!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
|
||||
|
|
|
@ -1021,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
|
|||
pending_idx = *((u16 *)skb->data);
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
for (j = start; j < i; j++) {
|
||||
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
|
||||
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
}
|
||||
|
||||
|
|
|
@ -2536,6 +2536,8 @@ extern void net_disable_timestamp(void);
|
|||
extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
|
||||
extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
|
||||
extern void dev_seq_stop(struct seq_file *seq, void *v);
|
||||
extern int dev_seq_open_ops(struct inode *inode, struct file *file,
|
||||
const struct seq_operations *ops);
|
||||
#endif
|
||||
|
||||
extern int netdev_class_create_file(struct class_attribute *class_attr);
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
*/
|
||||
|
||||
struct tc_stats {
|
||||
__u64 bytes; /* NUmber of enqueues bytes */
|
||||
__u64 bytes; /* Number of enqueued bytes */
|
||||
__u32 packets; /* Number of enqueued packets */
|
||||
__u32 drops; /* Packets dropped because of lack of resources */
|
||||
__u32 overlimits; /* Number of throttle events when this
|
||||
|
@ -297,7 +297,7 @@ struct tc_htb_glob {
|
|||
__u32 debug; /* debug flags */
|
||||
|
||||
/* stats */
|
||||
__u32 direct_pkts; /* count of non shapped packets */
|
||||
__u32 direct_pkts; /* count of non shaped packets */
|
||||
};
|
||||
enum {
|
||||
TCA_HTB_UNSPEC,
|
||||
|
@ -503,7 +503,7 @@ enum {
|
|||
};
|
||||
#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
|
||||
|
||||
/* State transition probablities for 4 state model */
|
||||
/* State transition probabilities for 4 state model */
|
||||
struct tc_netem_gimodel {
|
||||
__u32 p13;
|
||||
__u32 p31;
|
||||
|
|
|
@ -205,12 +205,7 @@ dst_feature(const struct dst_entry *dst, u32 feature)
|
|||
|
||||
static inline u32 dst_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
u32 mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
if (!mtu)
|
||||
mtu = dst->ops->default_mtu(dst);
|
||||
|
||||
return mtu;
|
||||
return dst->ops->mtu(dst);
|
||||
}
|
||||
|
||||
/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
|
||||
|
|
|
@ -17,7 +17,7 @@ struct dst_ops {
|
|||
int (*gc)(struct dst_ops *ops);
|
||||
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
|
||||
unsigned int (*default_advmss)(const struct dst_entry *);
|
||||
unsigned int (*default_mtu)(const struct dst_entry *);
|
||||
unsigned int (*mtu)(const struct dst_entry *);
|
||||
u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
|
||||
void (*destroy)(struct dst_entry *);
|
||||
void (*ifdown)(struct dst_entry *,
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
/** struct ip_options - IP Options
|
||||
*
|
||||
* @faddr - Saved first hop address
|
||||
* @nexthop - Saved nexthop address in LSRR and SSRR
|
||||
* @is_data - Options in __data, rather than skb
|
||||
* @is_strictroute - Strict source route
|
||||
* @srr_is_hit - Packet destination addr was our one
|
||||
|
@ -41,6 +42,7 @@
|
|||
*/
|
||||
struct ip_options {
|
||||
__be32 faddr;
|
||||
__be32 nexthop;
|
||||
unsigned char optlen;
|
||||
unsigned char srr;
|
||||
unsigned char rr;
|
||||
|
|
|
@ -35,6 +35,7 @@ struct inet_peer {
|
|||
|
||||
u32 metrics[RTAX_MAX];
|
||||
u32 rate_tokens; /* rate limiting for ICMP */
|
||||
int redirect_genid;
|
||||
unsigned long rate_last;
|
||||
unsigned long pmtu_expires;
|
||||
u32 pmtu_orig;
|
||||
|
|
|
@ -67,18 +67,18 @@ struct nf_ct_event_notifier {
|
|||
int (*fcn)(unsigned int events, struct nf_ct_event *item);
|
||||
};
|
||||
|
||||
extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
|
||||
extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
|
||||
extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
|
||||
extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
|
||||
extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
|
||||
|
||||
extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
|
||||
|
||||
static inline void
|
||||
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
|
||||
{
|
||||
struct net *net = nf_ct_net(ct);
|
||||
struct nf_conntrack_ecache *e;
|
||||
|
||||
if (nf_conntrack_event_cb == NULL)
|
||||
if (net->ct.nf_conntrack_event_cb == NULL)
|
||||
return;
|
||||
|
||||
e = nf_ct_ecache_find(ct);
|
||||
|
@ -95,11 +95,12 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
|
|||
int report)
|
||||
{
|
||||
int ret = 0;
|
||||
struct net *net = nf_ct_net(ct);
|
||||
struct nf_ct_event_notifier *notify;
|
||||
struct nf_conntrack_ecache *e;
|
||||
|
||||
rcu_read_lock();
|
||||
notify = rcu_dereference(nf_conntrack_event_cb);
|
||||
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
|
||||
if (notify == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -164,9 +165,8 @@ struct nf_exp_event_notifier {
|
|||
int (*fcn)(unsigned int events, struct nf_exp_event *item);
|
||||
};
|
||||
|
||||
extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
|
||||
extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
|
||||
extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
|
||||
extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
|
||||
extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
|
||||
|
||||
static inline void
|
||||
nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
|
||||
|
@ -174,11 +174,12 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
|
|||
u32 pid,
|
||||
int report)
|
||||
{
|
||||
struct net *net = nf_ct_exp_net(exp);
|
||||
struct nf_exp_event_notifier *notify;
|
||||
struct nf_conntrack_ecache *e;
|
||||
|
||||
rcu_read_lock();
|
||||
notify = rcu_dereference(nf_expect_event_cb);
|
||||
notify = rcu_dereference(net->ct.nf_expect_event_cb);
|
||||
if (notify == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
|
|
|
@ -18,6 +18,8 @@ struct netns_ct {
|
|||
struct hlist_nulls_head unconfirmed;
|
||||
struct hlist_nulls_head dying;
|
||||
struct ip_conntrack_stat __percpu *stat;
|
||||
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
|
||||
struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
|
||||
int sysctl_events;
|
||||
unsigned int sysctl_events_retry_timeout;
|
||||
int sysctl_acct;
|
||||
|
|
|
@ -116,7 +116,7 @@ struct red_parms {
|
|||
u32 qR; /* Cached random number */
|
||||
|
||||
unsigned long qavg; /* Average queue length: A scaled */
|
||||
psched_time_t qidlestart; /* Start of current idle period */
|
||||
ktime_t qidlestart; /* Start of current idle period */
|
||||
};
|
||||
|
||||
static inline u32 red_rmask(u8 Plog)
|
||||
|
@ -148,17 +148,17 @@ static inline void red_set_parms(struct red_parms *p,
|
|||
|
||||
static inline int red_is_idling(struct red_parms *p)
|
||||
{
|
||||
return p->qidlestart != PSCHED_PASTPERFECT;
|
||||
return p->qidlestart.tv64 != 0;
|
||||
}
|
||||
|
||||
static inline void red_start_of_idle_period(struct red_parms *p)
|
||||
{
|
||||
p->qidlestart = psched_get_time();
|
||||
p->qidlestart = ktime_get();
|
||||
}
|
||||
|
||||
static inline void red_end_of_idle_period(struct red_parms *p)
|
||||
{
|
||||
p->qidlestart = PSCHED_PASTPERFECT;
|
||||
p->qidlestart.tv64 = 0;
|
||||
}
|
||||
|
||||
static inline void red_restart(struct red_parms *p)
|
||||
|
@ -170,13 +170,10 @@ static inline void red_restart(struct red_parms *p)
|
|||
|
||||
static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
|
||||
{
|
||||
psched_time_t now;
|
||||
long us_idle;
|
||||
s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
|
||||
long us_idle = min_t(s64, delta, p->Scell_max);
|
||||
int shift;
|
||||
|
||||
now = psched_get_time();
|
||||
us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
|
||||
|
||||
/*
|
||||
* The problem: ideally, average length queue recalcultion should
|
||||
* be done over constant clock intervals. This is too expensive, so
|
||||
|
|
|
@ -71,12 +71,12 @@ struct rtable {
|
|||
struct fib_info *fi; /* for client ref to shared metrics */
|
||||
};
|
||||
|
||||
static inline bool rt_is_input_route(struct rtable *rt)
|
||||
static inline bool rt_is_input_route(const struct rtable *rt)
|
||||
{
|
||||
return rt->rt_route_iif != 0;
|
||||
}
|
||||
|
||||
static inline bool rt_is_output_route(struct rtable *rt)
|
||||
static inline bool rt_is_output_route(const struct rtable *rt)
|
||||
{
|
||||
return rt->rt_route_iif == 0;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <net/sock.h>
|
||||
|
||||
#include "br_private.h"
|
||||
#include "br_private_stp.h"
|
||||
|
||||
static inline size_t br_nlmsg_size(void)
|
||||
{
|
||||
|
@ -188,6 +189,11 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
|
|||
|
||||
p->state = new_state;
|
||||
br_log_state(p);
|
||||
|
||||
spin_lock_bh(&p->br->lock);
|
||||
br_port_state_selection(p->br);
|
||||
spin_unlock_bh(&p->br->lock);
|
||||
|
||||
br_ifinfo_notify(RTM_NEWLINK, p);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -399,25 +399,24 @@ void br_port_state_selection(struct net_bridge *br)
|
|||
struct net_bridge_port *p;
|
||||
unsigned int liveports = 0;
|
||||
|
||||
/* Don't change port states if userspace is handling STP */
|
||||
if (br->stp_enabled == BR_USER_STP)
|
||||
return;
|
||||
|
||||
list_for_each_entry(p, &br->port_list, list) {
|
||||
if (p->state == BR_STATE_DISABLED)
|
||||
continue;
|
||||
|
||||
if (p->port_no == br->root_port) {
|
||||
p->config_pending = 0;
|
||||
p->topology_change_ack = 0;
|
||||
br_make_forwarding(p);
|
||||
} else if (br_is_designated_port(p)) {
|
||||
del_timer(&p->message_age_timer);
|
||||
br_make_forwarding(p);
|
||||
} else {
|
||||
p->config_pending = 0;
|
||||
p->topology_change_ack = 0;
|
||||
br_make_blocking(p);
|
||||
/* Don't change port states if userspace is handling STP */
|
||||
if (br->stp_enabled != BR_USER_STP) {
|
||||
if (p->port_no == br->root_port) {
|
||||
p->config_pending = 0;
|
||||
p->topology_change_ack = 0;
|
||||
br_make_forwarding(p);
|
||||
} else if (br_is_designated_port(p)) {
|
||||
del_timer(&p->message_age_timer);
|
||||
br_make_forwarding(p);
|
||||
} else {
|
||||
p->config_pending = 0;
|
||||
p->topology_change_ack = 0;
|
||||
br_make_blocking(p);
|
||||
}
|
||||
}
|
||||
|
||||
if (p->state == BR_STATE_FORWARDING)
|
||||
|
|
|
@ -136,20 +136,21 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
|
|||
|
||||
static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
|
||||
{
|
||||
int tmp;
|
||||
u16 chks;
|
||||
u16 len;
|
||||
__le16 data;
|
||||
|
||||
struct cffrml *this = container_obj(layr);
|
||||
if (this->dofcs) {
|
||||
chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
|
||||
tmp = cpu_to_le16(chks);
|
||||
cfpkt_add_trail(pkt, &tmp, 2);
|
||||
data = cpu_to_le16(chks);
|
||||
cfpkt_add_trail(pkt, &data, 2);
|
||||
} else {
|
||||
cfpkt_pad_trail(pkt, 2);
|
||||
}
|
||||
len = cfpkt_getlen(pkt);
|
||||
tmp = cpu_to_le16(len);
|
||||
cfpkt_add_head(pkt, &tmp, 2);
|
||||
data = cpu_to_le16(len);
|
||||
cfpkt_add_head(pkt, &data, 2);
|
||||
cfpkt_info(pkt)->hdr_len += 2;
|
||||
if (cfpkt_erroneous(pkt)) {
|
||||
pr_err("Packet is erroneous!\n");
|
||||
|
|
|
@ -1396,7 +1396,7 @@ rollback:
|
|||
for_each_net(net) {
|
||||
for_each_netdev(net, dev) {
|
||||
if (dev == last)
|
||||
break;
|
||||
goto outroll;
|
||||
|
||||
if (dev->flags & IFF_UP) {
|
||||
nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
|
||||
|
@ -1407,6 +1407,7 @@ rollback:
|
|||
}
|
||||
}
|
||||
|
||||
outroll:
|
||||
raw_notifier_chain_unregister(&netdev_chain, nb);
|
||||
goto unlock;
|
||||
}
|
||||
|
@ -4282,6 +4283,12 @@ static int dev_seq_open(struct inode *inode, struct file *file)
|
|||
sizeof(struct dev_iter_state));
|
||||
}
|
||||
|
||||
int dev_seq_open_ops(struct inode *inode, struct file *file,
|
||||
const struct seq_operations *ops)
|
||||
{
|
||||
return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
|
||||
}
|
||||
|
||||
static const struct file_operations dev_seq_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = dev_seq_open,
|
||||
|
|
|
@ -696,8 +696,7 @@ static const struct seq_operations dev_mc_seq_ops = {
|
|||
|
||||
static int dev_mc_seq_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open_net(inode, file, &dev_mc_seq_ops,
|
||||
sizeof(struct seq_net_private));
|
||||
return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
|
||||
}
|
||||
|
||||
static const struct file_operations dev_mc_seq_fops = {
|
||||
|
|
|
@ -2397,7 +2397,10 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
|
|||
struct net *net = seq_file_net(seq);
|
||||
struct neigh_table *tbl = state->tbl;
|
||||
|
||||
pn = pn->next;
|
||||
do {
|
||||
pn = pn->next;
|
||||
} while (pn && !net_eq(pneigh_net(pn), net));
|
||||
|
||||
while (!pn) {
|
||||
if (++state->bucket > PNEIGH_HASHMASK)
|
||||
break;
|
||||
|
|
|
@ -2230,7 +2230,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb)
|
|||
* @shiftlen: shift up to this many bytes
|
||||
*
|
||||
* Attempts to shift up to shiftlen worth of bytes, which may be less than
|
||||
* the length of the skb, from tgt to skb. Returns number bytes shifted.
|
||||
* the length of the skb, from skb to tgt. Returns number bytes shifted.
|
||||
* It's up to caller to free skb if everything was shifted.
|
||||
*
|
||||
* If @tgt runs out of frags, the whole operation is aborted.
|
||||
|
|
|
@ -111,6 +111,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
|
||||
inet->inet_sport, inet->inet_dport, sk);
|
||||
if (IS_ERR(rt)) {
|
||||
err = PTR_ERR(rt);
|
||||
rt = NULL;
|
||||
goto failure;
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ static unsigned long dn_rt_deadline;
|
|||
static int dn_dst_gc(struct dst_ops *ops);
|
||||
static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
|
||||
static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
|
||||
static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
|
||||
static unsigned int dn_dst_mtu(const struct dst_entry *dst);
|
||||
static void dn_dst_destroy(struct dst_entry *);
|
||||
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
|
||||
static void dn_dst_link_failure(struct sk_buff *);
|
||||
|
@ -135,7 +135,7 @@ static struct dst_ops dn_dst_ops = {
|
|||
.gc = dn_dst_gc,
|
||||
.check = dn_dst_check,
|
||||
.default_advmss = dn_dst_default_advmss,
|
||||
.default_mtu = dn_dst_default_mtu,
|
||||
.mtu = dn_dst_mtu,
|
||||
.cow_metrics = dst_cow_metrics_generic,
|
||||
.destroy = dn_dst_destroy,
|
||||
.negative_advice = dn_dst_negative_advice,
|
||||
|
@ -825,9 +825,11 @@ static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
|
|||
return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
|
||||
}
|
||||
|
||||
static unsigned int dn_dst_default_mtu(const struct dst_entry *dst)
|
||||
static unsigned int dn_dst_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
return dst->dev->mtu;
|
||||
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
return mtu ? : dst->dev->mtu;
|
||||
}
|
||||
|
||||
static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
|
||||
|
|
|
@ -36,16 +36,13 @@ static void dn_slow_timer(unsigned long arg);
|
|||
|
||||
void dn_start_slow_timer(struct sock *sk)
|
||||
{
|
||||
sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
|
||||
sk->sk_timer.function = dn_slow_timer;
|
||||
sk->sk_timer.data = (unsigned long)sk;
|
||||
|
||||
add_timer(&sk->sk_timer);
|
||||
setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
|
||||
sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
|
||||
}
|
||||
|
||||
void dn_stop_slow_timer(struct sock *sk)
|
||||
{
|
||||
del_timer(&sk->sk_timer);
|
||||
sk_stop_timer(sk, &sk->sk_timer);
|
||||
}
|
||||
|
||||
static void dn_slow_timer(unsigned long arg)
|
||||
|
@ -53,12 +50,10 @@ static void dn_slow_timer(unsigned long arg)
|
|||
struct sock *sk = (struct sock *)arg;
|
||||
struct dn_scp *scp = DN_SK(sk);
|
||||
|
||||
sock_hold(sk);
|
||||
bh_lock_sock(sk);
|
||||
|
||||
if (sock_owned_by_user(sk)) {
|
||||
sk->sk_timer.expires = jiffies + HZ / 10;
|
||||
add_timer(&sk->sk_timer);
|
||||
sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -100,9 +95,7 @@ static void dn_slow_timer(unsigned long arg)
|
|||
scp->keepalive_fxn(sk);
|
||||
}
|
||||
|
||||
sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
|
||||
|
||||
add_timer(&sk->sk_timer);
|
||||
sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
|
||||
out:
|
||||
bh_unlock_sock(sk);
|
||||
sock_put(sk);
|
||||
|
|
|
@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
|
|||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int old_value = *(int *)ctl->data;
|
||||
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
|
||||
int new_value = *(int *)ctl->data;
|
||||
|
||||
if (write) {
|
||||
struct ipv4_devconf *cnf = ctl->extra1;
|
||||
|
@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
|
|||
|
||||
if (cnf == net->ipv4.devconf_dflt)
|
||||
devinet_copy_dflt_conf(net, i);
|
||||
if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
|
||||
if ((new_value == 0) && (old_value != 0))
|
||||
rt_cache_flush(net, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -1716,7 +1716,8 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
|
|||
if (err) {
|
||||
int j;
|
||||
|
||||
pmc->sfcount[sfmode]--;
|
||||
if (!delta)
|
||||
pmc->sfcount[sfmode]--;
|
||||
for (j=0; j<i; j++)
|
||||
(void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
|
||||
} else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
|
||||
|
|
|
@ -108,9 +108,6 @@ static int inet_csk_diag_fill(struct sock *sk,
|
|||
icsk->icsk_ca_ops->name);
|
||||
}
|
||||
|
||||
if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6))
|
||||
RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
|
||||
|
||||
r->idiag_family = sk->sk_family;
|
||||
r->idiag_state = sk->sk_state;
|
||||
r->idiag_timer = 0;
|
||||
|
@ -125,16 +122,23 @@ static int inet_csk_diag_fill(struct sock *sk,
|
|||
r->id.idiag_src[0] = inet->inet_rcv_saddr;
|
||||
r->id.idiag_dst[0] = inet->inet_daddr;
|
||||
|
||||
/* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
|
||||
* hence this needs to be included regardless of socket family.
|
||||
*/
|
||||
if (ext & (1 << (INET_DIAG_TOS - 1)))
|
||||
RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
|
||||
|
||||
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
||||
if (r->idiag_family == AF_INET6) {
|
||||
const struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
|
||||
if (ext & (1 << (INET_DIAG_TCLASS - 1)))
|
||||
RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
|
||||
|
||||
ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
|
||||
&np->rcv_saddr);
|
||||
ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
|
||||
&np->daddr);
|
||||
if (ext & (1 << (INET_DIAG_TCLASS - 1)))
|
||||
RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
|
|||
|
||||
rt = skb_rtable(skb);
|
||||
|
||||
if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway)
|
||||
if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
|
||||
goto sr_failed;
|
||||
|
||||
if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
|
||||
|
|
|
@ -568,12 +568,13 @@ void ip_forward_options(struct sk_buff *skb)
|
|||
) {
|
||||
if (srrptr + 3 > srrspace)
|
||||
break;
|
||||
if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0)
|
||||
if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0)
|
||||
break;
|
||||
}
|
||||
if (srrptr + 3 <= srrspace) {
|
||||
opt->is_changed = 1;
|
||||
ip_rt_get_source(&optptr[srrptr-1], skb, rt);
|
||||
ip_hdr(skb)->daddr = opt->nexthop;
|
||||
optptr[2] = srrptr+4;
|
||||
} else if (net_ratelimit())
|
||||
printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
|
||||
|
@ -640,7 +641,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
|
|||
}
|
||||
if (srrptr <= srrspace) {
|
||||
opt->srr_is_hit = 1;
|
||||
iph->daddr = nexthop;
|
||||
opt->nexthop = nexthop;
|
||||
opt->is_changed = 1;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -64,7 +64,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
|
|||
/* Change in oif may mean change in hh_len. */
|
||||
hh_len = skb_dst(skb)->dev->hard_header_len;
|
||||
if (skb_headroom(skb) < hh_len &&
|
||||
pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
|
||||
pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
|
||||
0, GFP_ATOMIC))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -325,7 +325,6 @@ config IP_NF_TARGET_TTL
|
|||
# raw + specific targets
|
||||
config IP_NF_RAW
|
||||
tristate 'raw table support (required for NOTRACK/TRACE)'
|
||||
depends on NETFILTER_ADVANCED
|
||||
help
|
||||
This option adds a `raw' table to iptables. This table is the very
|
||||
first in the netfilter framework and hooks in at the PREROUTING
|
||||
|
|
|
@ -131,6 +131,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
|
|||
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
|
||||
static int ip_rt_min_advmss __read_mostly = 256;
|
||||
static int rt_chain_length_max __read_mostly = 20;
|
||||
static int redirect_genid;
|
||||
|
||||
/*
|
||||
* Interface to generic destination cache.
|
||||
|
@ -138,7 +139,7 @@ static int rt_chain_length_max __read_mostly = 20;
|
|||
|
||||
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
|
||||
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
|
||||
static unsigned int ipv4_default_mtu(const struct dst_entry *dst);
|
||||
static unsigned int ipv4_mtu(const struct dst_entry *dst);
|
||||
static void ipv4_dst_destroy(struct dst_entry *dst);
|
||||
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
|
||||
static void ipv4_link_failure(struct sk_buff *skb);
|
||||
|
@ -193,7 +194,7 @@ static struct dst_ops ipv4_dst_ops = {
|
|||
.gc = rt_garbage_collect,
|
||||
.check = ipv4_dst_check,
|
||||
.default_advmss = ipv4_default_advmss,
|
||||
.default_mtu = ipv4_default_mtu,
|
||||
.mtu = ipv4_mtu,
|
||||
.cow_metrics = ipv4_cow_metrics,
|
||||
.destroy = ipv4_dst_destroy,
|
||||
.ifdown = ipv4_dst_ifdown,
|
||||
|
@ -416,9 +417,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
|
|||
else {
|
||||
struct rtable *r = v;
|
||||
struct neighbour *n;
|
||||
int len;
|
||||
int len, HHUptod;
|
||||
|
||||
rcu_read_lock();
|
||||
n = dst_get_neighbour(&r->dst);
|
||||
HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
|
||||
rcu_read_unlock();
|
||||
|
||||
seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
|
||||
"%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
|
||||
r->dst.dev ? r->dst.dev->name : "*",
|
||||
|
@ -432,7 +437,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
|
|||
dst_metric(&r->dst, RTAX_RTTVAR)),
|
||||
r->rt_key_tos,
|
||||
-1,
|
||||
(n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
|
||||
HHUptod,
|
||||
r->rt_spec_dst, &len);
|
||||
|
||||
seq_printf(seq, "%*s\n", 127 - len, "");
|
||||
|
@ -837,6 +842,7 @@ static void rt_cache_invalidate(struct net *net)
|
|||
|
||||
get_random_bytes(&shuffle, sizeof(shuffle));
|
||||
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
|
||||
redirect_genid++;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1391,8 +1397,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
|
|||
|
||||
peer = rt->peer;
|
||||
if (peer) {
|
||||
if (peer->redirect_learned.a4 != new_gw) {
|
||||
if (peer->redirect_learned.a4 != new_gw ||
|
||||
peer->redirect_genid != redirect_genid) {
|
||||
peer->redirect_learned.a4 = new_gw;
|
||||
peer->redirect_genid = redirect_genid;
|
||||
atomic_inc(&__rt_peer_genid);
|
||||
}
|
||||
check_peer_redir(&rt->dst, peer);
|
||||
|
@ -1685,12 +1693,8 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
|
|||
}
|
||||
|
||||
|
||||
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
|
||||
static struct rtable *ipv4_validate_peer(struct rtable *rt)
|
||||
{
|
||||
struct rtable *rt = (struct rtable *) dst;
|
||||
|
||||
if (rt_is_expired(rt))
|
||||
return NULL;
|
||||
if (rt->rt_peer_genid != rt_peer_genid()) {
|
||||
struct inet_peer *peer;
|
||||
|
||||
|
@ -1699,17 +1703,29 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
|
|||
|
||||
peer = rt->peer;
|
||||
if (peer) {
|
||||
check_peer_pmtu(dst, peer);
|
||||
check_peer_pmtu(&rt->dst, peer);
|
||||
|
||||
if (peer->redirect_genid != redirect_genid)
|
||||
peer->redirect_learned.a4 = 0;
|
||||
if (peer->redirect_learned.a4 &&
|
||||
peer->redirect_learned.a4 != rt->rt_gateway) {
|
||||
if (check_peer_redir(dst, peer))
|
||||
if (check_peer_redir(&rt->dst, peer))
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
rt->rt_peer_genid = rt_peer_genid();
|
||||
}
|
||||
return rt;
|
||||
}
|
||||
|
||||
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
|
||||
{
|
||||
struct rtable *rt = (struct rtable *) dst;
|
||||
|
||||
if (rt_is_expired(rt))
|
||||
return NULL;
|
||||
dst = (struct dst_entry *) ipv4_validate_peer(rt);
|
||||
return dst;
|
||||
}
|
||||
|
||||
|
@ -1814,12 +1830,17 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
|
|||
return advmss;
|
||||
}
|
||||
|
||||
static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
|
||||
static unsigned int ipv4_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
unsigned int mtu = dst->dev->mtu;
|
||||
const struct rtable *rt = (const struct rtable *) dst;
|
||||
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
if (mtu && rt_is_output_route(rt))
|
||||
return mtu;
|
||||
|
||||
mtu = dst->dev->mtu;
|
||||
|
||||
if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
|
||||
const struct rtable *rt = (const struct rtable *) dst;
|
||||
|
||||
if (rt->rt_gateway != rt->rt_dst && mtu > 576)
|
||||
mtu = 576;
|
||||
|
@ -1852,6 +1873,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
|
|||
dst_init_metrics(&rt->dst, peer->metrics, false);
|
||||
|
||||
check_peer_pmtu(&rt->dst, peer);
|
||||
if (peer->redirect_genid != redirect_genid)
|
||||
peer->redirect_learned.a4 = 0;
|
||||
if (peer->redirect_learned.a4 &&
|
||||
peer->redirect_learned.a4 != rt->rt_gateway) {
|
||||
rt->rt_gateway = peer->redirect_learned.a4;
|
||||
|
@ -2357,6 +2380,9 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
|||
rth->rt_mark == skb->mark &&
|
||||
net_eq(dev_net(rth->dst.dev), net) &&
|
||||
!rt_is_expired(rth)) {
|
||||
rth = ipv4_validate_peer(rth);
|
||||
if (!rth)
|
||||
continue;
|
||||
if (noref) {
|
||||
dst_use_noref(&rth->dst, jiffies);
|
||||
skb_dst_set_noref(skb, &rth->dst);
|
||||
|
@ -2732,6 +2758,9 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
|
|||
(IPTOS_RT_MASK | RTO_ONLINK)) &&
|
||||
net_eq(dev_net(rth->dst.dev), net) &&
|
||||
!rt_is_expired(rth)) {
|
||||
rth = ipv4_validate_peer(rth);
|
||||
if (!rth)
|
||||
continue;
|
||||
dst_use(&rth->dst, jiffies);
|
||||
RT_CACHE_STAT_INC(out_hit);
|
||||
rcu_read_unlock_bh();
|
||||
|
@ -2755,9 +2784,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
|
||||
static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
return 0;
|
||||
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
return mtu ? : dst->dev->mtu;
|
||||
}
|
||||
|
||||
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
|
||||
|
@ -2775,7 +2806,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
|
|||
.protocol = cpu_to_be16(ETH_P_IP),
|
||||
.destroy = ipv4_dst_destroy,
|
||||
.check = ipv4_blackhole_dst_check,
|
||||
.default_mtu = ipv4_blackhole_default_mtu,
|
||||
.mtu = ipv4_blackhole_mtu,
|
||||
.default_advmss = ipv4_default_advmss,
|
||||
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
|
||||
.cow_metrics = ipv4_rt_blackhole_cow_metrics,
|
||||
|
|
|
@ -1164,7 +1164,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
|
||||
struct sk_buff *skb;
|
||||
unsigned int ulen;
|
||||
unsigned int ulen, copied;
|
||||
int peeked;
|
||||
int err;
|
||||
int is_udplite = IS_UDPLITE(sk);
|
||||
|
@ -1186,9 +1186,10 @@ try_again:
|
|||
goto out;
|
||||
|
||||
ulen = skb->len - sizeof(struct udphdr);
|
||||
if (len > ulen)
|
||||
len = ulen;
|
||||
else if (len < ulen)
|
||||
copied = len;
|
||||
if (copied > ulen)
|
||||
copied = ulen;
|
||||
else if (copied < ulen)
|
||||
msg->msg_flags |= MSG_TRUNC;
|
||||
|
||||
/*
|
||||
|
@ -1197,14 +1198,14 @@ try_again:
|
|||
* coverage checksum (UDP-Lite), do it before the copy.
|
||||
*/
|
||||
|
||||
if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
|
||||
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
|
||||
if (udp_lib_checksum_complete(skb))
|
||||
goto csum_copy_err;
|
||||
}
|
||||
|
||||
if (skb_csum_unnecessary(skb))
|
||||
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
|
||||
msg->msg_iov, len);
|
||||
msg->msg_iov, copied);
|
||||
else {
|
||||
err = skb_copy_and_csum_datagram_iovec(skb,
|
||||
sizeof(struct udphdr),
|
||||
|
@ -1233,7 +1234,7 @@ try_again:
|
|||
if (inet->cmsg_flags)
|
||||
ip_cmsg_recv(msg, skb);
|
||||
|
||||
err = len;
|
||||
err = copied;
|
||||
if (flags & MSG_TRUNC)
|
||||
err = ulen;
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
|
|||
* request_sock (formerly open request) hash tables.
|
||||
*/
|
||||
static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
|
||||
const u32 rnd, const u16 synq_hsize)
|
||||
const u32 rnd, const u32 synq_hsize)
|
||||
{
|
||||
u32 c;
|
||||
|
||||
|
|
|
@ -503,7 +503,7 @@ done:
|
|||
goto e_inval;
|
||||
if (val > 255 || val < -1)
|
||||
goto e_inval;
|
||||
np->mcast_hops = val;
|
||||
np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
|
||||
retv = 0;
|
||||
break;
|
||||
|
||||
|
|
|
@ -1571,7 +1571,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
|
|||
}
|
||||
if (!rt->rt6i_peer)
|
||||
rt6_bind_peer(rt, 1);
|
||||
if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
|
||||
if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
|
||||
goto release;
|
||||
|
||||
if (dev->addr_len) {
|
||||
|
|
|
@ -186,7 +186,6 @@ config IP6_NF_MANGLE
|
|||
|
||||
config IP6_NF_RAW
|
||||
tristate 'raw table support (required for TRACE)'
|
||||
depends on NETFILTER_ADVANCED
|
||||
help
|
||||
This option adds a `raw' table to ip6tables. This table is the very
|
||||
first in the netfilter framework and hooks in at the PREROUTING
|
||||
|
|
|
@ -77,7 +77,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
|
|||
const struct in6_addr *dest);
|
||||
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
|
||||
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
|
||||
static unsigned int ip6_default_mtu(const struct dst_entry *dst);
|
||||
static unsigned int ip6_mtu(const struct dst_entry *dst);
|
||||
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
|
||||
static void ip6_dst_destroy(struct dst_entry *);
|
||||
static void ip6_dst_ifdown(struct dst_entry *,
|
||||
|
@ -144,7 +144,7 @@ static struct dst_ops ip6_dst_ops_template = {
|
|||
.gc_thresh = 1024,
|
||||
.check = ip6_dst_check,
|
||||
.default_advmss = ip6_default_advmss,
|
||||
.default_mtu = ip6_default_mtu,
|
||||
.mtu = ip6_mtu,
|
||||
.cow_metrics = ipv6_cow_metrics,
|
||||
.destroy = ip6_dst_destroy,
|
||||
.ifdown = ip6_dst_ifdown,
|
||||
|
@ -155,9 +155,11 @@ static struct dst_ops ip6_dst_ops_template = {
|
|||
.neigh_lookup = ip6_neigh_lookup,
|
||||
};
|
||||
|
||||
static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
|
||||
static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
return 0;
|
||||
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
return mtu ? : dst->dev->mtu;
|
||||
}
|
||||
|
||||
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
|
||||
|
@ -175,7 +177,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
|
|||
.protocol = cpu_to_be16(ETH_P_IPV6),
|
||||
.destroy = ip6_dst_destroy,
|
||||
.check = ip6_dst_check,
|
||||
.default_mtu = ip6_blackhole_default_mtu,
|
||||
.mtu = ip6_blackhole_mtu,
|
||||
.default_advmss = ip6_default_advmss,
|
||||
.update_pmtu = ip6_rt_blackhole_update_pmtu,
|
||||
.cow_metrics = ip6_rt_blackhole_cow_metrics,
|
||||
|
@ -1041,10 +1043,15 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
|
|||
return mtu;
|
||||
}
|
||||
|
||||
static unsigned int ip6_default_mtu(const struct dst_entry *dst)
|
||||
static unsigned int ip6_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
unsigned int mtu = IPV6_MIN_MTU;
|
||||
struct inet6_dev *idev;
|
||||
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
if (mtu)
|
||||
return mtu;
|
||||
|
||||
mtu = IPV6_MIN_MTU;
|
||||
|
||||
rcu_read_lock();
|
||||
idev = __in6_dev_get(dst->dev);
|
||||
|
|
|
@ -1255,6 +1255,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
if (!want_cookie || tmp_opt.tstamp_ok)
|
||||
TCP_ECN_create_request(req, tcp_hdr(skb));
|
||||
|
||||
treq->iif = sk->sk_bound_dev_if;
|
||||
|
||||
/* So that link locals have meaning */
|
||||
if (!sk->sk_bound_dev_if &&
|
||||
ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
|
||||
treq->iif = inet6_iif(skb);
|
||||
|
||||
if (!isn) {
|
||||
struct inet_peer *peer = NULL;
|
||||
|
||||
|
@ -1264,12 +1271,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
atomic_inc(&skb->users);
|
||||
treq->pktopts = skb;
|
||||
}
|
||||
treq->iif = sk->sk_bound_dev_if;
|
||||
|
||||
/* So that link locals have meaning */
|
||||
if (!sk->sk_bound_dev_if &&
|
||||
ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
|
||||
treq->iif = inet6_iif(skb);
|
||||
|
||||
if (want_cookie) {
|
||||
isn = cookie_v6_init_sequence(sk, skb, &req->mss);
|
||||
|
|
|
@ -340,7 +340,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
|||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
unsigned int ulen;
|
||||
unsigned int ulen, copied;
|
||||
int peeked;
|
||||
int err;
|
||||
int is_udplite = IS_UDPLITE(sk);
|
||||
|
@ -363,9 +363,10 @@ try_again:
|
|||
goto out;
|
||||
|
||||
ulen = skb->len - sizeof(struct udphdr);
|
||||
if (len > ulen)
|
||||
len = ulen;
|
||||
else if (len < ulen)
|
||||
copied = len;
|
||||
if (copied > ulen)
|
||||
copied = ulen;
|
||||
else if (copied < ulen)
|
||||
msg->msg_flags |= MSG_TRUNC;
|
||||
|
||||
is_udp4 = (skb->protocol == htons(ETH_P_IP));
|
||||
|
@ -376,14 +377,14 @@ try_again:
|
|||
* coverage checksum (UDP-Lite), do it before the copy.
|
||||
*/
|
||||
|
||||
if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
|
||||
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
|
||||
if (udp_lib_checksum_complete(skb))
|
||||
goto csum_copy_err;
|
||||
}
|
||||
|
||||
if (skb_csum_unnecessary(skb))
|
||||
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
|
||||
msg->msg_iov,len);
|
||||
msg->msg_iov, copied );
|
||||
else {
|
||||
err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
|
||||
if (err == -EINVAL)
|
||||
|
@ -432,7 +433,7 @@ try_again:
|
|||
datagram_recv_ctl(sk, msg, skb);
|
||||
}
|
||||
|
||||
err = len;
|
||||
err = copied;
|
||||
if (flags & MSG_TRUNC)
|
||||
err = ulen;
|
||||
|
||||
|
|
|
@ -1072,7 +1072,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
|
|||
|
||||
/* Get routing info from the tunnel socket */
|
||||
skb_dst_drop(skb);
|
||||
skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
|
||||
skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
|
||||
|
||||
inet = inet_sk(sk);
|
||||
fl = &inet->cork.fl;
|
||||
|
|
|
@ -161,6 +161,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* if we're already stopping ignore any new requests to stop */
|
||||
if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
|
||||
spin_unlock_bh(&sta->lock);
|
||||
return -EALREADY;
|
||||
}
|
||||
|
||||
if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
|
||||
/* not even started yet! */
|
||||
ieee80211_assign_tid_tx(sta, tid, NULL);
|
||||
|
@ -169,6 +175,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
|||
return 0;
|
||||
}
|
||||
|
||||
set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
|
||||
|
||||
spin_unlock_bh(&sta->lock);
|
||||
|
||||
#ifdef CONFIG_MAC80211_HT_DEBUG
|
||||
|
@ -176,8 +184,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
|||
sta->sta.addr, tid);
|
||||
#endif /* CONFIG_MAC80211_HT_DEBUG */
|
||||
|
||||
set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
|
||||
|
||||
del_timer_sync(&tid_tx->addba_resp_timer);
|
||||
|
||||
/*
|
||||
|
@ -187,6 +193,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
|||
*/
|
||||
clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
|
||||
|
||||
/*
|
||||
* There might be a few packets being processed right now (on
|
||||
* another CPU) that have already gotten past the aggregation
|
||||
* check when it was still OPERATIONAL and consequently have
|
||||
* IEEE80211_TX_CTL_AMPDU set. In that case, this code might
|
||||
* call into the driver at the same time or even before the
|
||||
* TX paths calls into it, which could confuse the driver.
|
||||
*
|
||||
* Wait for all currently running TX paths to finish before
|
||||
* telling the driver. New packets will not go through since
|
||||
* the aggregation session is no longer OPERATIONAL.
|
||||
*/
|
||||
synchronize_net();
|
||||
|
||||
tid_tx->stop_initiator = initiator;
|
||||
tid_tx->tx_stop = tx;
|
||||
|
||||
|
@ -757,11 +777,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
|
|||
goto out;
|
||||
}
|
||||
|
||||
del_timer(&tid_tx->addba_resp_timer);
|
||||
del_timer_sync(&tid_tx->addba_resp_timer);
|
||||
|
||||
#ifdef CONFIG_MAC80211_HT_DEBUG
|
||||
printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* addba_resp_timer may have fired before we got here, and
|
||||
* caused WANT_STOP to be set. If the stop then was already
|
||||
* processed further, STOPPING might be set.
|
||||
*/
|
||||
if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
|
||||
test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
|
||||
#ifdef CONFIG_MAC80211_HT_DEBUG
|
||||
printk(KERN_DEBUG
|
||||
"got addBA resp for tid %d but we already gave up\n",
|
||||
tid);
|
||||
#endif
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* IEEE 802.11-2007 7.3.1.14:
|
||||
* In an ADDBA Response frame, when the Status Code field
|
||||
|
|
|
@ -274,9 +274,9 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
|
|||
|
||||
PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
|
||||
|
||||
PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
|
||||
"3839 bytes");
|
||||
PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
|
||||
"3839 bytes");
|
||||
PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
|
||||
"7935 bytes");
|
||||
|
||||
/*
|
||||
|
|
|
@ -260,7 +260,7 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
|
|||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
||||
struct ieee80211_radiotap_header *rthdr;
|
||||
unsigned char *pos;
|
||||
__le16 txflags;
|
||||
u16 txflags;
|
||||
|
||||
rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
|
||||
|
||||
|
@ -290,13 +290,13 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
|
|||
txflags = 0;
|
||||
if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
|
||||
!is_multicast_ether_addr(hdr->addr1))
|
||||
txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
|
||||
txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
|
||||
|
||||
if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
|
||||
(info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
|
||||
txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
|
||||
txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
|
||||
else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
|
||||
txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
|
||||
txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
|
||||
|
||||
put_unaligned_le16(txflags, pos);
|
||||
pos += 2;
|
||||
|
|
|
@ -201,7 +201,6 @@ config NF_CONNTRACK_BROADCAST
|
|||
|
||||
config NF_CONNTRACK_NETBIOS_NS
|
||||
tristate "NetBIOS name service protocol support"
|
||||
depends on NETFILTER_ADVANCED
|
||||
select NF_CONNTRACK_BROADCAST
|
||||
help
|
||||
NetBIOS name service requests are sent as broadcast messages from an
|
||||
|
@ -542,7 +541,6 @@ config NETFILTER_XT_TARGET_NOTRACK
|
|||
tristate '"NOTRACK" target support'
|
||||
depends on IP_NF_RAW || IP6_NF_RAW
|
||||
depends on NF_CONNTRACK
|
||||
depends on NETFILTER_ADVANCED
|
||||
help
|
||||
The NOTRACK target allows a select rule to specify
|
||||
which packets *not* to enter the conntrack/NAT
|
||||
|
|
|
@ -158,7 +158,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
const struct ip_set_hash *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_ipport4_elem data = { };
|
||||
u32 ip, ip_to, p = 0, port, port_to;
|
||||
u32 ip, ip_to = 0, p = 0, port, port_to;
|
||||
u32 timeout = h->timeout;
|
||||
bool with_ports = false;
|
||||
int ret;
|
||||
|
|
|
@ -162,7 +162,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
const struct ip_set_hash *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_ipportip4_elem data = { };
|
||||
u32 ip, ip_to, p = 0, port, port_to;
|
||||
u32 ip, ip_to = 0, p = 0, port, port_to;
|
||||
u32 timeout = h->timeout;
|
||||
bool with_ports = false;
|
||||
int ret;
|
||||
|
|
|
@ -184,7 +184,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
const struct ip_set_hash *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
|
||||
u32 ip, ip_to, p = 0, port, port_to;
|
||||
u32 ip, ip_to = 0, p = 0, port, port_to;
|
||||
u32 ip2_from = 0, ip2_to, ip2_last, ip2;
|
||||
u32 timeout = h->timeout;
|
||||
bool with_ports = false;
|
||||
|
|
|
@ -27,22 +27,17 @@
|
|||
|
||||
static DEFINE_MUTEX(nf_ct_ecache_mutex);
|
||||
|
||||
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
|
||||
|
||||
struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_expect_event_cb);
|
||||
|
||||
/* deliver cached events and clear cache entry - must be called with locally
|
||||
* disabled softirqs */
|
||||
void nf_ct_deliver_cached_events(struct nf_conn *ct)
|
||||
{
|
||||
struct net *net = nf_ct_net(ct);
|
||||
unsigned long events;
|
||||
struct nf_ct_event_notifier *notify;
|
||||
struct nf_conntrack_ecache *e;
|
||||
|
||||
rcu_read_lock();
|
||||
notify = rcu_dereference(nf_conntrack_event_cb);
|
||||
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
|
||||
if (notify == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -83,19 +78,20 @@ out_unlock:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
|
||||
|
||||
int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
|
||||
int nf_conntrack_register_notifier(struct net *net,
|
||||
struct nf_ct_event_notifier *new)
|
||||
{
|
||||
int ret = 0;
|
||||
struct nf_ct_event_notifier *notify;
|
||||
|
||||
mutex_lock(&nf_ct_ecache_mutex);
|
||||
notify = rcu_dereference_protected(nf_conntrack_event_cb,
|
||||
notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
|
||||
lockdep_is_held(&nf_ct_ecache_mutex));
|
||||
if (notify != NULL) {
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
RCU_INIT_POINTER(nf_conntrack_event_cb, new);
|
||||
RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
|
||||
mutex_unlock(&nf_ct_ecache_mutex);
|
||||
return ret;
|
||||
|
||||
|
@ -105,32 +101,34 @@ out_unlock:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
|
||||
|
||||
void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
|
||||
void nf_conntrack_unregister_notifier(struct net *net,
|
||||
struct nf_ct_event_notifier *new)
|
||||
{
|
||||
struct nf_ct_event_notifier *notify;
|
||||
|
||||
mutex_lock(&nf_ct_ecache_mutex);
|
||||
notify = rcu_dereference_protected(nf_conntrack_event_cb,
|
||||
notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
|
||||
lockdep_is_held(&nf_ct_ecache_mutex));
|
||||
BUG_ON(notify != new);
|
||||
RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
|
||||
RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
|
||||
mutex_unlock(&nf_ct_ecache_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
|
||||
|
||||
int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
|
||||
int nf_ct_expect_register_notifier(struct net *net,
|
||||
struct nf_exp_event_notifier *new)
|
||||
{
|
||||
int ret = 0;
|
||||
struct nf_exp_event_notifier *notify;
|
||||
|
||||
mutex_lock(&nf_ct_ecache_mutex);
|
||||
notify = rcu_dereference_protected(nf_expect_event_cb,
|
||||
notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
|
||||
lockdep_is_held(&nf_ct_ecache_mutex));
|
||||
if (notify != NULL) {
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
RCU_INIT_POINTER(nf_expect_event_cb, new);
|
||||
RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
|
||||
mutex_unlock(&nf_ct_ecache_mutex);
|
||||
return ret;
|
||||
|
||||
|
@ -140,15 +138,16 @@ out_unlock:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
|
||||
|
||||
void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
|
||||
void nf_ct_expect_unregister_notifier(struct net *net,
|
||||
struct nf_exp_event_notifier *new)
|
||||
{
|
||||
struct nf_exp_event_notifier *notify;
|
||||
|
||||
mutex_lock(&nf_ct_ecache_mutex);
|
||||
notify = rcu_dereference_protected(nf_expect_event_cb,
|
||||
notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
|
||||
lockdep_is_held(&nf_ct_ecache_mutex));
|
||||
BUG_ON(notify != new);
|
||||
RCU_INIT_POINTER(nf_expect_event_cb, NULL);
|
||||
RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
|
||||
mutex_unlock(&nf_ct_ecache_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* (C) 2001 by Jay Schulist <jschlst@samba.org>
|
||||
* (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
|
||||
* (C) 2003 by Patrick Mchardy <kaber@trash.net>
|
||||
* (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
|
||||
* (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
|
||||
*
|
||||
* Initial connection tracking via netlink development funded and
|
||||
* generally made possible by Network Robots, Inc. (www.networkrobots.com)
|
||||
|
@ -2163,6 +2163,54 @@ MODULE_ALIAS("ip_conntrack_netlink");
|
|||
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
|
||||
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
|
||||
|
||||
static int __net_init ctnetlink_net_init(struct net *net)
|
||||
{
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
int ret;
|
||||
|
||||
ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
|
||||
if (ret < 0) {
|
||||
pr_err("ctnetlink_init: cannot register notifier.\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
|
||||
if (ret < 0) {
|
||||
pr_err("ctnetlink_init: cannot expect register notifier.\n");
|
||||
goto err_unreg_notifier;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
err_unreg_notifier:
|
||||
nf_conntrack_unregister_notifier(net, &ctnl_notifier);
|
||||
err_out:
|
||||
return ret;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void ctnetlink_net_exit(struct net *net)
|
||||
{
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
|
||||
nf_conntrack_unregister_notifier(net, &ctnl_notifier);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
|
||||
{
|
||||
struct net *net;
|
||||
|
||||
list_for_each_entry(net, net_exit_list, exit_list)
|
||||
ctnetlink_net_exit(net);
|
||||
}
|
||||
|
||||
static struct pernet_operations ctnetlink_net_ops = {
|
||||
.init = ctnetlink_net_init,
|
||||
.exit_batch = ctnetlink_net_exit_batch,
|
||||
};
|
||||
|
||||
static int __init ctnetlink_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
@ -2180,28 +2228,15 @@ static int __init ctnetlink_init(void)
|
|||
goto err_unreg_subsys;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
ret = nf_conntrack_register_notifier(&ctnl_notifier);
|
||||
if (ret < 0) {
|
||||
pr_err("ctnetlink_init: cannot register notifier.\n");
|
||||
if (register_pernet_subsys(&ctnetlink_net_ops)) {
|
||||
pr_err("ctnetlink_init: cannot register pernet operations\n");
|
||||
goto err_unreg_exp_subsys;
|
||||
}
|
||||
|
||||
ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp);
|
||||
if (ret < 0) {
|
||||
pr_err("ctnetlink_init: cannot expect register notifier.\n");
|
||||
goto err_unreg_notifier;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
err_unreg_notifier:
|
||||
nf_conntrack_unregister_notifier(&ctnl_notifier);
|
||||
err_unreg_exp_subsys:
|
||||
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
|
||||
#endif
|
||||
err_unreg_subsys:
|
||||
nfnetlink_subsys_unregister(&ctnl_subsys);
|
||||
err_out:
|
||||
|
@ -2213,11 +2248,7 @@ static void __exit ctnetlink_exit(void)
|
|||
pr_info("ctnetlink: unregistering from nfnetlink.\n");
|
||||
|
||||
nf_ct_remove_userspace_expectations();
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
nf_ct_expect_unregister_notifier(&ctnl_notifier_exp);
|
||||
nf_conntrack_unregister_notifier(&ctnl_notifier);
|
||||
#endif
|
||||
|
||||
unregister_pernet_subsys(&ctnetlink_net_ops);
|
||||
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
|
||||
nfnetlink_subsys_unregister(&ctnl_subsys);
|
||||
}
|
||||
|
|
|
@ -111,8 +111,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
|
|||
struct netlbl_domaddr_map *addrmap = NULL;
|
||||
struct netlbl_domaddr4_map *map4 = NULL;
|
||||
struct netlbl_domaddr6_map *map6 = NULL;
|
||||
const struct in_addr *addr4, *mask4;
|
||||
const struct in6_addr *addr6, *mask6;
|
||||
|
||||
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
|
||||
if (entry == NULL)
|
||||
|
@ -133,9 +131,9 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
|
|||
INIT_LIST_HEAD(&addrmap->list6);
|
||||
|
||||
switch (family) {
|
||||
case AF_INET:
|
||||
addr4 = addr;
|
||||
mask4 = mask;
|
||||
case AF_INET: {
|
||||
const struct in_addr *addr4 = addr;
|
||||
const struct in_addr *mask4 = mask;
|
||||
map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
|
||||
if (map4 == NULL)
|
||||
goto cfg_unlbl_map_add_failure;
|
||||
|
@ -148,9 +146,11 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
|
|||
if (ret_val != 0)
|
||||
goto cfg_unlbl_map_add_failure;
|
||||
break;
|
||||
case AF_INET6:
|
||||
addr6 = addr;
|
||||
mask6 = mask;
|
||||
}
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
case AF_INET6: {
|
||||
const struct in6_addr *addr6 = addr;
|
||||
const struct in6_addr *mask6 = mask;
|
||||
map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
|
||||
if (map6 == NULL)
|
||||
goto cfg_unlbl_map_add_failure;
|
||||
|
@ -162,11 +162,13 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
|
|||
map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
|
||||
ipv6_addr_copy(&map6->list.mask, mask6);
|
||||
map6->list.valid = 1;
|
||||
ret_val = netlbl_af4list_add(&map4->list,
|
||||
&addrmap->list4);
|
||||
ret_val = netlbl_af6list_add(&map6->list,
|
||||
&addrmap->list6);
|
||||
if (ret_val != 0)
|
||||
goto cfg_unlbl_map_add_failure;
|
||||
break;
|
||||
}
|
||||
#endif /* IPv6 */
|
||||
default:
|
||||
goto cfg_unlbl_map_add_failure;
|
||||
break;
|
||||
|
@ -225,9 +227,11 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
|
|||
case AF_INET:
|
||||
addr_len = sizeof(struct in_addr);
|
||||
break;
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
case AF_INET6:
|
||||
addr_len = sizeof(struct in6_addr);
|
||||
break;
|
||||
#endif /* IPv6 */
|
||||
default:
|
||||
return -EPFNOSUPPORT;
|
||||
}
|
||||
|
@ -266,9 +270,11 @@ int netlbl_cfg_unlbl_static_del(struct net *net,
|
|||
case AF_INET:
|
||||
addr_len = sizeof(struct in_addr);
|
||||
break;
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
case AF_INET6:
|
||||
addr_len = sizeof(struct in6_addr);
|
||||
break;
|
||||
#endif /* IPv6 */
|
||||
default:
|
||||
return -EPFNOSUPPORT;
|
||||
}
|
||||
|
|
|
@ -209,8 +209,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
|
|||
ctl->Plog, ctl->Scell_log,
|
||||
nla_data(tb[TCA_RED_STAB]));
|
||||
|
||||
if (skb_queue_empty(&sch->q))
|
||||
red_end_of_idle_period(&q->parms);
|
||||
if (!q->qdisc->q.qlen)
|
||||
red_start_of_idle_period(&q->parms);
|
||||
|
||||
sch_tree_unlock(sch);
|
||||
return 0;
|
||||
|
|
|
@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
|
|||
|
||||
|
||||
static int
|
||||
__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
|
||||
__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
|
||||
struct net_device *dev, struct netdev_queue *txq,
|
||||
struct neighbour *mn)
|
||||
{
|
||||
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
|
||||
struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
|
||||
struct neighbour *mn = dst_get_neighbour(skb_dst(skb));
|
||||
struct teql_sched_data *q = qdisc_priv(txq->qdisc);
|
||||
struct neighbour *n = q->ncache;
|
||||
|
||||
if (mn->tbl == NULL)
|
||||
|
@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
|
|||
}
|
||||
|
||||
static inline int teql_resolve(struct sk_buff *skb,
|
||||
struct sk_buff *skb_res, struct net_device *dev)
|
||||
struct sk_buff *skb_res,
|
||||
struct net_device *dev,
|
||||
struct netdev_queue *txq)
|
||||
{
|
||||
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct neighbour *mn;
|
||||
int res;
|
||||
|
||||
if (txq->qdisc == &noop_qdisc)
|
||||
return -ENODEV;
|
||||
|
||||
if (dev->header_ops == NULL ||
|
||||
skb_dst(skb) == NULL ||
|
||||
dst_get_neighbour(skb_dst(skb)) == NULL)
|
||||
if (!dev->header_ops || !dst)
|
||||
return 0;
|
||||
return __teql_resolve(skb, skb_res, dev);
|
||||
|
||||
rcu_read_lock();
|
||||
mn = dst_get_neighbour(dst);
|
||||
res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
|
||||
rcu_read_unlock();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
@ -307,7 +316,7 @@ restart:
|
|||
continue;
|
||||
}
|
||||
|
||||
switch (teql_resolve(skb, skb_res, slave)) {
|
||||
switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
|
||||
case 0:
|
||||
if (__netif_tx_trylock(slave_txq)) {
|
||||
unsigned int length = qdisc_pkt_len(skb);
|
||||
|
|
|
@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
|
|||
struct sctp_auth_bytes *key;
|
||||
|
||||
/* Verify that we are not going to overflow INT_MAX */
|
||||
if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
|
||||
if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
|
||||
return NULL;
|
||||
|
||||
/* Allocate the shared key */
|
||||
|
|
|
@ -1957,6 +1957,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
if ((UNIXCB(skb).pid != siocb->scm->pid) ||
|
||||
(UNIXCB(skb).cred != siocb->scm->cred)) {
|
||||
skb_queue_head(&sk->sk_receive_queue, skb);
|
||||
sk->sk_data_ready(sk, skb->len);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
|
@ -1974,6 +1975,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
chunk = min_t(unsigned int, skb->len, size);
|
||||
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
|
||||
skb_queue_head(&sk->sk_receive_queue, skb);
|
||||
sk->sk_data_ready(sk, skb->len);
|
||||
if (copied == 0)
|
||||
copied = -EFAULT;
|
||||
break;
|
||||
|
@ -1991,6 +1993,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
/* put the skb back if we didn't use it up.. */
|
||||
if (skb->len) {
|
||||
skb_queue_head(&sk->sk_receive_queue, skb);
|
||||
sk->sk_data_ready(sk, skb->len);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2006,6 +2009,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
|
||||
/* put message back and return */
|
||||
skb_queue_head(&sk->sk_receive_queue, skb);
|
||||
sk->sk_data_ready(sk, skb->len);
|
||||
break;
|
||||
}
|
||||
} while (size);
|
||||
|
|
|
@ -89,8 +89,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
|
|||
[NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
|
||||
[NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
|
||||
|
||||
[NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN },
|
||||
[NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN },
|
||||
[NL80211_ATTR_MAC] = { .len = ETH_ALEN },
|
||||
[NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
|
||||
|
||||
[NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
|
||||
[NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
|
||||
|
|
|
@ -2037,6 +2037,10 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
|
|||
}
|
||||
|
||||
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
|
||||
if (!request_wiphy) {
|
||||
reg_set_request_processed();
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!last_request->intersect) {
|
||||
int r;
|
||||
|
|
|
@ -2382,9 +2382,11 @@ static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
|
|||
return dst_metric_advmss(dst->path);
|
||||
}
|
||||
|
||||
static unsigned int xfrm_default_mtu(const struct dst_entry *dst)
|
||||
static unsigned int xfrm_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
return dst_mtu(dst->path);
|
||||
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
return mtu ? : dst_mtu(dst->path);
|
||||
}
|
||||
|
||||
static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
|
||||
|
@ -2411,8 +2413,8 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
|
|||
dst_ops->check = xfrm_dst_check;
|
||||
if (likely(dst_ops->default_advmss == NULL))
|
||||
dst_ops->default_advmss = xfrm_default_advmss;
|
||||
if (likely(dst_ops->default_mtu == NULL))
|
||||
dst_ops->default_mtu = xfrm_default_mtu;
|
||||
if (likely(dst_ops->mtu == NULL))
|
||||
dst_ops->mtu = xfrm_mtu;
|
||||
if (likely(dst_ops->negative_advice == NULL))
|
||||
dst_ops->negative_advice = xfrm_negative_advice;
|
||||
if (likely(dst_ops->link_failure == NULL))
|
||||
|
|
Loading…
Reference in New Issue