Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/marvell/mvneta.c The mvneta.c conflict is a case of overlapping changes, a conversion to devm_ioremap_resource() vs. a conversion to netdev_alloc_pcpu_stats. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
64c27237a0
|
@ -1839,8 +1839,8 @@ F: net/bluetooth/
|
|||
F: include/net/bluetooth/
|
||||
|
||||
BONDING DRIVER
|
||||
M: Jay Vosburgh <fubar@us.ibm.com>
|
||||
M: Veaceslav Falico <vfalico@redhat.com>
|
||||
M: Jay Vosburgh <j.vosburgh@gmail.com>
|
||||
M: Veaceslav Falico <vfalico@gmail.com>
|
||||
M: Andy Gospodarek <andy@greyhouse.net>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://sourceforge.net/projects/bonding/
|
||||
|
@ -6019,6 +6019,7 @@ F: include/uapi/linux/net.h
|
|||
F: include/uapi/linux/netdevice.h
|
||||
F: tools/net/
|
||||
F: tools/testing/selftests/net/
|
||||
F: lib/random32.c
|
||||
|
||||
NETWORKING [IPv4/IPv6]
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
|
|
|
@ -445,20 +445,10 @@ static inline int pte_same(pte_t a, pte_t b)
|
|||
return a.pte == b.pte;
|
||||
}
|
||||
|
||||
static inline int pteval_present(pteval_t pteval)
|
||||
{
|
||||
/*
|
||||
* Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
|
||||
* way clearly states that the intent is that protnone and numa
|
||||
* hinting ptes are considered present for the purposes of
|
||||
* pagetable operations like zapping, protection changes, gup etc.
|
||||
*/
|
||||
return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
|
||||
}
|
||||
|
||||
static inline int pte_present(pte_t a)
|
||||
{
|
||||
return pteval_present(pte_flags(a));
|
||||
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
|
||||
_PAGE_NUMA);
|
||||
}
|
||||
|
||||
#define pte_accessible pte_accessible
|
||||
|
|
|
@ -119,9 +119,10 @@ static inline void setup_node_to_cpumask_map(void) { }
|
|||
|
||||
extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
|
||||
#ifdef ENABLE_TOPO_DEFINES
|
||||
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
|
||||
|
||||
#ifdef ENABLE_TOPO_DEFINES
|
||||
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#endif
|
||||
|
|
|
@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
|
|||
/* Assume pteval_t is equivalent to all the other *val_t types. */
|
||||
static pteval_t pte_mfn_to_pfn(pteval_t val)
|
||||
{
|
||||
if (pteval_present(val)) {
|
||||
if (val & _PAGE_PRESENT) {
|
||||
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
unsigned long pfn = mfn_to_pfn(mfn);
|
||||
|
||||
|
@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
|
|||
|
||||
static pteval_t pte_pfn_to_mfn(pteval_t val)
|
||||
{
|
||||
if (pteval_present(val)) {
|
||||
if (val & _PAGE_PRESENT) {
|
||||
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
pteval_t flags = val & PTE_FLAGS_MASK;
|
||||
unsigned long mfn;
|
||||
|
|
|
@ -842,7 +842,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
|
|||
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
|
||||
dev_priv->gtt.base.start / PAGE_SIZE,
|
||||
dev_priv->gtt.base.total / PAGE_SIZE,
|
||||
false);
|
||||
true);
|
||||
}
|
||||
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
|
|
|
@ -866,13 +866,16 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
|
|||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
int ret;
|
||||
|
||||
if (nouveau_runtime_pm == 0)
|
||||
return -EINVAL;
|
||||
if (nouveau_runtime_pm == 0) {
|
||||
pm_runtime_forbid(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* are we optimus enabled? */
|
||||
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
|
||||
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
|
||||
return -EINVAL;
|
||||
pm_runtime_forbid(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
nv_debug_level(SILENT);
|
||||
|
@ -923,12 +926,15 @@ static int nouveau_pmops_runtime_idle(struct device *dev)
|
|||
struct nouveau_drm *drm = nouveau_drm(drm_dev);
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (nouveau_runtime_pm == 0)
|
||||
if (nouveau_runtime_pm == 0) {
|
||||
pm_runtime_forbid(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* are we optimus enabled? */
|
||||
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
|
||||
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
|
||||
pm_runtime_forbid(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
|
|
@ -403,11 +403,15 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
|
|||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
int ret;
|
||||
|
||||
if (radeon_runtime_pm == 0)
|
||||
return -EINVAL;
|
||||
if (radeon_runtime_pm == 0) {
|
||||
pm_runtime_forbid(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (radeon_runtime_pm == -1 && !radeon_is_px())
|
||||
return -EINVAL;
|
||||
if (radeon_runtime_pm == -1 && !radeon_is_px()) {
|
||||
pm_runtime_forbid(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
drm_kms_helper_poll_disable(drm_dev);
|
||||
|
@ -456,12 +460,15 @@ static int radeon_pmops_runtime_idle(struct device *dev)
|
|||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (radeon_runtime_pm == 0)
|
||||
if (radeon_runtime_pm == 0) {
|
||||
pm_runtime_forbid(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* are we PX enabled? */
|
||||
if (radeon_runtime_pm == -1 && !radeon_is_px()) {
|
||||
DRM_DEBUG_DRIVER("failing to power off - not px\n");
|
||||
pm_runtime_forbid(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
|
|
@ -177,8 +177,10 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
|
|||
if (obj->vmapping)
|
||||
udl_gem_vunmap(obj);
|
||||
|
||||
if (gem_obj->import_attach)
|
||||
if (gem_obj->import_attach) {
|
||||
drm_prime_gem_destroy(gem_obj, obj->sg);
|
||||
put_device(gem_obj->dev->dev);
|
||||
}
|
||||
|
||||
if (obj->pages)
|
||||
udl_gem_put_pages(obj);
|
||||
|
@ -256,9 +258,12 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
|
|||
int ret;
|
||||
|
||||
/* need to attach */
|
||||
get_device(dev->dev);
|
||||
attach = dma_buf_attach(dma_buf, dev->dev);
|
||||
if (IS_ERR(attach))
|
||||
if (IS_ERR(attach)) {
|
||||
put_device(dev->dev);
|
||||
return ERR_CAST(attach);
|
||||
}
|
||||
|
||||
get_dma_buf(dma_buf);
|
||||
|
||||
|
@ -282,6 +287,6 @@ fail_unmap:
|
|||
fail_detach:
|
||||
dma_buf_detach(dma_buf, attach);
|
||||
dma_buf_put(dma_buf);
|
||||
|
||||
put_device(dev->dev);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
|
|
@ -39,7 +39,9 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <sysdev/fsl_soc.h>
|
||||
#include <asm/cpm.h>
|
||||
|
|
|
@ -76,8 +76,18 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
|
|||
struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
|
||||
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
|
||||
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
|
||||
int val;
|
||||
|
||||
return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
|
||||
mutex_lock(&kpad->gpio_lock);
|
||||
|
||||
if (kpad->dir[bank] & bit)
|
||||
val = kpad->dat_out[bank];
|
||||
else
|
||||
val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank);
|
||||
|
||||
mutex_unlock(&kpad->gpio_lock);
|
||||
|
||||
return !!(val & bit);
|
||||
}
|
||||
|
||||
static void adp5588_gpio_set_value(struct gpio_chip *chip,
|
||||
|
|
|
@ -27,29 +27,32 @@ struct da9052_onkey {
|
|||
|
||||
static void da9052_onkey_query(struct da9052_onkey *onkey)
|
||||
{
|
||||
int key_stat;
|
||||
int ret;
|
||||
|
||||
key_stat = da9052_reg_read(onkey->da9052, DA9052_EVENT_B_REG);
|
||||
if (key_stat < 0) {
|
||||
ret = da9052_reg_read(onkey->da9052, DA9052_STATUS_A_REG);
|
||||
if (ret < 0) {
|
||||
dev_err(onkey->da9052->dev,
|
||||
"Failed to read onkey event %d\n", key_stat);
|
||||
"Failed to read onkey event err=%d\n", ret);
|
||||
} else {
|
||||
/*
|
||||
* Since interrupt for deassertion of ONKEY pin is not
|
||||
* generated, onkey event state determines the onkey
|
||||
* button state.
|
||||
*/
|
||||
key_stat &= DA9052_EVENTB_ENONKEY;
|
||||
input_report_key(onkey->input, KEY_POWER, key_stat);
|
||||
input_sync(onkey->input);
|
||||
}
|
||||
bool pressed = !(ret & DA9052_STATUSA_NONKEY);
|
||||
|
||||
/*
|
||||
* Interrupt is generated only when the ONKEY pin is asserted.
|
||||
* Hence the deassertion of the pin is simulated through work queue.
|
||||
*/
|
||||
if (key_stat)
|
||||
schedule_delayed_work(&onkey->work, msecs_to_jiffies(50));
|
||||
input_report_key(onkey->input, KEY_POWER, pressed);
|
||||
input_sync(onkey->input);
|
||||
|
||||
/*
|
||||
* Interrupt is generated only when the ONKEY pin
|
||||
* is asserted. Hence the deassertion of the pin
|
||||
* is simulated through work queue.
|
||||
*/
|
||||
if (pressed)
|
||||
schedule_delayed_work(&onkey->work,
|
||||
msecs_to_jiffies(50));
|
||||
}
|
||||
}
|
||||
|
||||
static void da9052_onkey_work(struct work_struct *work)
|
||||
|
|
|
@ -409,7 +409,6 @@ static int cypress_set_input_params(struct input_dev *input,
|
|||
__clear_bit(REL_X, input->relbit);
|
||||
__clear_bit(REL_Y, input->relbit);
|
||||
|
||||
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
|
||||
__set_bit(EV_KEY, input->evbit);
|
||||
__set_bit(BTN_LEFT, input->keybit);
|
||||
__set_bit(BTN_RIGHT, input->keybit);
|
||||
|
|
|
@ -265,11 +265,22 @@ static int synaptics_identify(struct psmouse *psmouse)
|
|||
* Read touchpad resolution and maximum reported coordinates
|
||||
* Resolution is left zero if touchpad does not support the query
|
||||
*/
|
||||
|
||||
static const int *quirk_min_max;
|
||||
|
||||
static int synaptics_resolution(struct psmouse *psmouse)
|
||||
{
|
||||
struct synaptics_data *priv = psmouse->private;
|
||||
unsigned char resp[3];
|
||||
|
||||
if (quirk_min_max) {
|
||||
priv->x_min = quirk_min_max[0];
|
||||
priv->x_max = quirk_min_max[1];
|
||||
priv->y_min = quirk_min_max[2];
|
||||
priv->y_max = quirk_min_max[3];
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (SYN_ID_MAJOR(priv->identity) < 4)
|
||||
return 0;
|
||||
|
||||
|
@ -1485,10 +1496,54 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static const struct dmi_system_id min_max_dmi_table[] __initconst = {
|
||||
#if defined(CONFIG_DMI)
|
||||
{
|
||||
/* Lenovo ThinkPad Helix */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5052, 2258, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad X240 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
|
||||
},
|
||||
.driver_data = (int []){1232, 5710, 1156, 4696},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad T440s */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad T540p */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5056, 2058, 4832},
|
||||
},
|
||||
#endif
|
||||
{ }
|
||||
};
|
||||
|
||||
void __init synaptics_module_init(void)
|
||||
{
|
||||
const struct dmi_system_id *min_max_dmi;
|
||||
|
||||
impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
|
||||
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
|
||||
|
||||
min_max_dmi = dmi_first_match(min_max_dmi_table);
|
||||
if (min_max_dmi)
|
||||
quirk_min_max = min_max_dmi->driver_data;
|
||||
}
|
||||
|
||||
static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
|
||||
|
|
|
@ -17641,8 +17641,6 @@ static int tg3_init_one(struct pci_dev *pdev,
|
|||
|
||||
tg3_init_bufmgr_config(tp);
|
||||
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
/* 5700 B0 chips do not support checksumming correctly due
|
||||
* to hardware bugs.
|
||||
*/
|
||||
|
@ -17674,7 +17672,8 @@ static int tg3_init_one(struct pci_dev *pdev,
|
|||
features |= NETIF_F_TSO_ECN;
|
||||
}
|
||||
|
||||
dev->features |= features;
|
||||
dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->vlan_features |= features;
|
||||
|
||||
/*
|
||||
|
|
|
@ -89,8 +89,9 @@
|
|||
#define MVNETA_TX_IN_PRGRS BIT(1)
|
||||
#define MVNETA_TX_FIFO_EMPTY BIT(8)
|
||||
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
|
||||
#define MVNETA_SGMII_SERDES_CFG 0x24A0
|
||||
#define MVNETA_SERDES_CFG 0x24A0
|
||||
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
|
||||
#define MVNETA_RGMII_SERDES_PROTO 0x0667
|
||||
#define MVNETA_TYPE_PRIO 0x24bc
|
||||
#define MVNETA_FORCE_UNI BIT(21)
|
||||
#define MVNETA_TXQ_CMD_1 0x24e4
|
||||
|
@ -162,7 +163,7 @@
|
|||
#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
|
||||
#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
|
||||
#define MVNETA_GMAC_CTRL_2 0x2c08
|
||||
#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
|
||||
#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
|
||||
#define MVNETA_GMAC2_PORT_RGMII BIT(4)
|
||||
#define MVNETA_GMAC2_PORT_RESET BIT(6)
|
||||
#define MVNETA_GMAC_STATUS 0x2c10
|
||||
|
@ -711,35 +712,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
|
|||
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
|
||||
static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
|
||||
|
||||
if (enable)
|
||||
val |= MVNETA_GMAC2_PORT_RGMII;
|
||||
else
|
||||
val &= ~MVNETA_GMAC2_PORT_RGMII;
|
||||
|
||||
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
|
||||
}
|
||||
|
||||
/* Config SGMII port */
|
||||
static void mvneta_port_sgmii_config(struct mvneta_port *pp)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
|
||||
val |= MVNETA_GMAC2_PSC_ENABLE;
|
||||
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
|
||||
|
||||
mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
|
||||
}
|
||||
|
||||
/* Start the Ethernet port RX and TX activity */
|
||||
static void mvneta_port_up(struct mvneta_port *pp)
|
||||
{
|
||||
|
@ -2757,12 +2729,15 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
|
|||
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
|
||||
|
||||
if (phy_mode == PHY_INTERFACE_MODE_SGMII)
|
||||
mvneta_port_sgmii_config(pp);
|
||||
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
|
||||
else
|
||||
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
|
||||
|
||||
mvneta_gmac_rgmii_set(pp, 1);
|
||||
val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
|
||||
|
||||
val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
|
||||
|
||||
/* Cancel Port Reset */
|
||||
val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
|
||||
val &= ~MVNETA_GMAC2_PORT_RESET;
|
||||
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
|
||||
|
||||
|
|
|
@ -2755,7 +2755,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
|
|||
|
||||
static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
|
||||
{
|
||||
int ret = __mlx4_init_one(pdev, 0);
|
||||
const struct pci_device_id *id;
|
||||
int ret;
|
||||
|
||||
id = pci_match_id(mlx4_pci_table, pdev);
|
||||
ret = __mlx4_init_one(pdev, id->driver_data);
|
||||
|
||||
return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
|
|
@ -4756,7 +4756,9 @@ static int qlge_probe(struct pci_dev *pdev,
|
|||
ndev->features = ndev->hw_features;
|
||||
ndev->vlan_features = ndev->hw_features;
|
||||
/* vlan gets same features (except vlan filter) */
|
||||
ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX);
|
||||
|
||||
if (test_bit(QL_DMA64, &qdev->flags))
|
||||
ndev->features |= NETIF_F_HIGHDMA;
|
||||
|
|
|
@ -180,7 +180,8 @@ static void ifb_setup(struct net_device *dev)
|
|||
dev->tx_queue_len = TX_Q_LIMIT;
|
||||
|
||||
dev->features |= IFB_FEATURES;
|
||||
dev->vlan_features |= IFB_FEATURES;
|
||||
dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
|
||||
dev->flags |= IFF_NOARP;
|
||||
dev->flags &= ~IFF_MULTICAST;
|
||||
|
|
|
@ -752,14 +752,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
|
|||
// precondition: never called in_interrupt
|
||||
static void usbnet_terminate_urbs(struct usbnet *dev)
|
||||
{
|
||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
int temp;
|
||||
|
||||
/* ensure there are no more active urbs */
|
||||
add_wait_queue(&unlink_wakeup, &wait);
|
||||
add_wait_queue(&dev->wait, &wait);
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
dev->wait = &unlink_wakeup;
|
||||
temp = unlink_urbs(dev, &dev->txq) +
|
||||
unlink_urbs(dev, &dev->rxq);
|
||||
|
||||
|
@ -773,15 +771,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
|
|||
"waited for %d urb completions\n", temp);
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
dev->wait = NULL;
|
||||
remove_wait_queue(&unlink_wakeup, &wait);
|
||||
remove_wait_queue(&dev->wait, &wait);
|
||||
}
|
||||
|
||||
int usbnet_stop (struct net_device *net)
|
||||
{
|
||||
struct usbnet *dev = netdev_priv(net);
|
||||
struct driver_info *info = dev->driver_info;
|
||||
int retval;
|
||||
int retval, pm;
|
||||
|
||||
clear_bit(EVENT_DEV_OPEN, &dev->flags);
|
||||
netif_stop_queue (net);
|
||||
|
@ -791,6 +788,8 @@ int usbnet_stop (struct net_device *net)
|
|||
net->stats.rx_packets, net->stats.tx_packets,
|
||||
net->stats.rx_errors, net->stats.tx_errors);
|
||||
|
||||
/* to not race resume */
|
||||
pm = usb_autopm_get_interface(dev->intf);
|
||||
/* allow minidriver to stop correctly (wireless devices to turn off
|
||||
* radio etc) */
|
||||
if (info->stop) {
|
||||
|
@ -817,6 +816,9 @@ int usbnet_stop (struct net_device *net)
|
|||
dev->flags = 0;
|
||||
del_timer_sync (&dev->delay);
|
||||
tasklet_kill (&dev->bh);
|
||||
if (!pm)
|
||||
usb_autopm_put_interface(dev->intf);
|
||||
|
||||
if (info->manage_power &&
|
||||
!test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
|
||||
info->manage_power(dev, 0);
|
||||
|
@ -1437,11 +1439,12 @@ static void usbnet_bh (unsigned long param)
|
|||
/* restart RX again after disabling due to high error rate */
|
||||
clear_bit(EVENT_RX_KILL, &dev->flags);
|
||||
|
||||
// waiting for all pending urbs to complete?
|
||||
if (dev->wait) {
|
||||
if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
|
||||
wake_up (dev->wait);
|
||||
}
|
||||
/* waiting for all pending urbs to complete?
|
||||
* only then can we forgo submitting anew
|
||||
*/
|
||||
if (waitqueue_active(&dev->wait)) {
|
||||
if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
|
||||
wake_up_all(&dev->wait);
|
||||
|
||||
// or are we maybe short a few urbs?
|
||||
} else if (netif_running (dev->net) &&
|
||||
|
@ -1580,6 +1583,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
|||
dev->driver_name = name;
|
||||
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
|
||||
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
|
||||
init_waitqueue_head(&dev->wait);
|
||||
skb_queue_head_init (&dev->rxq);
|
||||
skb_queue_head_init (&dev->txq);
|
||||
skb_queue_head_init (&dev->done);
|
||||
|
@ -1791,9 +1795,10 @@ int usbnet_resume (struct usb_interface *intf)
|
|||
spin_unlock_irq(&dev->txq.lock);
|
||||
|
||||
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
|
||||
/* handle remote wakeup ASAP */
|
||||
if (!dev->wait &&
|
||||
netif_device_present(dev->net) &&
|
||||
/* handle remote wakeup ASAP
|
||||
* we cannot race against stop
|
||||
*/
|
||||
if (netif_device_present(dev->net) &&
|
||||
!timer_pending(&dev->delay) &&
|
||||
!test_bit(EVENT_RX_HALT, &dev->flags))
|
||||
rx_alloc_submit(dev, GFP_NOIO);
|
||||
|
|
|
@ -278,7 +278,10 @@ static void veth_setup(struct net_device *dev)
|
|||
dev->features |= NETIF_F_LLTX;
|
||||
dev->features |= VETH_FEATURES;
|
||||
dev->vlan_features = dev->features &
|
||||
~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX);
|
||||
~(NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_STAG_RX);
|
||||
dev->destructor = veth_dev_free;
|
||||
|
||||
dev->hw_features = VETH_FEATURES;
|
||||
|
|
|
@ -671,8 +671,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
|
|||
if (err)
|
||||
break;
|
||||
} while (rq->vq->num_free);
|
||||
if (unlikely(!virtqueue_kick(rq->vq)))
|
||||
return false;
|
||||
virtqueue_kick(rq->vq);
|
||||
return !oom;
|
||||
}
|
||||
|
||||
|
@ -877,7 +876,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
err = xmit_skb(sq, skb);
|
||||
|
||||
/* This should not happen! */
|
||||
if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
|
||||
if (unlikely(err)) {
|
||||
dev->stats.tx_fifo_errors++;
|
||||
if (net_ratelimit())
|
||||
dev_warn(&dev->dev,
|
||||
|
@ -886,6 +885,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
virtqueue_kick(sq->vq);
|
||||
|
||||
/* Don't wait up for transmitted skbs to be freed. */
|
||||
skb_orphan(skb);
|
||||
|
|
|
@ -505,9 +505,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
|
|||
r = -ENOBUFS;
|
||||
goto err;
|
||||
}
|
||||
d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
|
||||
r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
|
||||
ARRAY_SIZE(vq->iov) - seg, &out,
|
||||
&in, log, log_num);
|
||||
if (unlikely(r < 0))
|
||||
goto err;
|
||||
|
||||
d = r;
|
||||
if (d == vq->num) {
|
||||
r = 0;
|
||||
goto err;
|
||||
|
@ -532,6 +536,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
|
|||
*iovcount = seg;
|
||||
if (unlikely(log))
|
||||
*log_num = nlogs;
|
||||
|
||||
/* Detect overrun */
|
||||
if (unlikely(datalen > 0)) {
|
||||
r = UIO_MAXIOV + 1;
|
||||
goto err;
|
||||
}
|
||||
return headcount;
|
||||
err:
|
||||
vhost_discard_vq_desc(vq, headcount);
|
||||
|
@ -587,6 +597,14 @@ static void handle_rx(struct vhost_net *net)
|
|||
/* On error, stop handling until the next kick. */
|
||||
if (unlikely(headcount < 0))
|
||||
break;
|
||||
/* On overrun, truncate and discard */
|
||||
if (unlikely(headcount > UIO_MAXIOV)) {
|
||||
msg.msg_iovlen = 1;
|
||||
err = sock->ops->recvmsg(NULL, sock, &msg,
|
||||
1, MSG_DONTWAIT | MSG_TRUNC);
|
||||
pr_debug("Discarded rx packet: len %zd\n", sock_len);
|
||||
continue;
|
||||
}
|
||||
/* OK, now we need to know about added descriptors. */
|
||||
if (!headcount) {
|
||||
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
|
||||
|
|
|
@ -399,12 +399,26 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
|
|||
state = BP_EAGAIN;
|
||||
break;
|
||||
}
|
||||
|
||||
pfn = page_to_pfn(page);
|
||||
frame_list[i] = pfn_to_mfn(pfn);
|
||||
|
||||
scrub_page(page);
|
||||
|
||||
frame_list[i] = page_to_pfn(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that ballooned highmem pages don't have kmaps.
|
||||
*
|
||||
* Do this before changing the p2m as kmap_flush_unused()
|
||||
* reads PTEs to obtain pages (and hence needs the original
|
||||
* p2m entry).
|
||||
*/
|
||||
kmap_flush_unused();
|
||||
|
||||
/* Update direct mapping, invalidate P2M, and add to balloon. */
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
pfn = frame_list[i];
|
||||
frame_list[i] = pfn_to_mfn(pfn);
|
||||
page = pfn_to_page(pfn);
|
||||
|
||||
#ifdef CONFIG_XEN_HAVE_PVMMU
|
||||
/*
|
||||
* Ballooned out frames are effectively replaced with
|
||||
|
@ -429,11 +443,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
|
|||
}
|
||||
#endif
|
||||
|
||||
balloon_append(pfn_to_page(pfn));
|
||||
balloon_append(page);
|
||||
}
|
||||
|
||||
/* Ensure that ballooned highmem pages don't have kmaps. */
|
||||
kmap_flush_unused();
|
||||
flush_tlb_all();
|
||||
|
||||
set_xen_guest_handle(reservation.extent_start, frame_list);
|
||||
|
|
|
@ -41,19 +41,8 @@ static const struct dentry_operations anon_inodefs_dentry_operations = {
|
|||
static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
|
||||
int flags, const char *dev_name, void *data)
|
||||
{
|
||||
struct dentry *root;
|
||||
root = mount_pseudo(fs_type, "anon_inode:", NULL,
|
||||
return mount_pseudo(fs_type, "anon_inode:", NULL,
|
||||
&anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
|
||||
if (!IS_ERR(root)) {
|
||||
struct super_block *s = root->d_sb;
|
||||
anon_inode_inode = alloc_anon_inode(s);
|
||||
if (IS_ERR(anon_inode_inode)) {
|
||||
dput(root);
|
||||
deactivate_locked_super(s);
|
||||
root = ERR_CAST(anon_inode_inode);
|
||||
}
|
||||
}
|
||||
return root;
|
||||
}
|
||||
|
||||
static struct file_system_type anon_inode_fs_type = {
|
||||
|
@ -175,22 +164,15 @@ EXPORT_SYMBOL_GPL(anon_inode_getfd);
|
|||
|
||||
static int __init anon_inode_init(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = register_filesystem(&anon_inode_fs_type);
|
||||
if (error)
|
||||
goto err_exit;
|
||||
anon_inode_mnt = kern_mount(&anon_inode_fs_type);
|
||||
if (IS_ERR(anon_inode_mnt)) {
|
||||
error = PTR_ERR(anon_inode_mnt);
|
||||
goto err_unregister_filesystem;
|
||||
}
|
||||
return 0;
|
||||
if (IS_ERR(anon_inode_mnt))
|
||||
panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt));
|
||||
|
||||
err_unregister_filesystem:
|
||||
unregister_filesystem(&anon_inode_fs_type);
|
||||
err_exit:
|
||||
panic(KERN_ERR "anon_inode_init() failed (%d)\n", error);
|
||||
anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
|
||||
if (IS_ERR(anon_inode_inode))
|
||||
panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
fs_initcall(anon_inode_init);
|
||||
|
|
|
@ -346,7 +346,9 @@ int ocfs2_cluster_connect(const char *stack_name,
|
|||
|
||||
strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1);
|
||||
new_conn->cc_namelen = grouplen;
|
||||
strlcpy(new_conn->cc_cluster_name, cluster_name, CLUSTER_NAME_MAX + 1);
|
||||
if (cluster_name_len)
|
||||
strlcpy(new_conn->cc_cluster_name, cluster_name,
|
||||
CLUSTER_NAME_MAX + 1);
|
||||
new_conn->cc_cluster_name_len = cluster_name_len;
|
||||
new_conn->cc_recovery_handler = recovery_handler;
|
||||
new_conn->cc_recovery_data = recovery_data;
|
||||
|
|
|
@ -163,4 +163,11 @@ enum {
|
|||
/* changeable features with no special hardware requirements */
|
||||
#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
|
||||
|
||||
#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
|
||||
NETIF_F_HW_VLAN_CTAG_RX | \
|
||||
NETIF_F_HW_VLAN_CTAG_TX | \
|
||||
NETIF_F_HW_VLAN_STAG_FILTER | \
|
||||
NETIF_F_HW_VLAN_STAG_RX | \
|
||||
NETIF_F_HW_VLAN_STAG_TX)
|
||||
|
||||
#endif /* _LINUX_NETDEV_FEATURES_H */
|
||||
|
|
|
@ -3097,7 +3097,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
|
|||
{
|
||||
return __skb_gso_segment(skb, features, true);
|
||||
}
|
||||
__be16 skb_network_protocol(struct sk_buff *skb);
|
||||
__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
|
||||
|
||||
static inline bool can_checksum_protocol(netdev_features_t features,
|
||||
__be16 protocol)
|
||||
|
|
|
@ -2508,8 +2508,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
|
|||
unsigned int flags);
|
||||
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
|
||||
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
|
||||
void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from,
|
||||
int len, int hlen);
|
||||
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
|
||||
int len, int hlen);
|
||||
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
|
||||
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
|
||||
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
|
||||
|
|
|
@ -30,7 +30,7 @@ struct usbnet {
|
|||
struct driver_info *driver_info;
|
||||
const char *driver_name;
|
||||
void *driver_priv;
|
||||
wait_queue_head_t *wait;
|
||||
wait_queue_head_t wait;
|
||||
struct mutex phy_mutex;
|
||||
unsigned char suspend_count;
|
||||
unsigned char pkt_cnt, pkt_err;
|
||||
|
|
|
@ -31,8 +31,10 @@
|
|||
#define IF_PREFIX_AUTOCONF 0x02
|
||||
|
||||
enum {
|
||||
INET6_IFADDR_STATE_PREDAD,
|
||||
INET6_IFADDR_STATE_DAD,
|
||||
INET6_IFADDR_STATE_POSTDAD,
|
||||
INET6_IFADDR_STATE_ERRDAD,
|
||||
INET6_IFADDR_STATE_UP,
|
||||
INET6_IFADDR_STATE_DEAD,
|
||||
};
|
||||
|
@ -58,7 +60,7 @@ struct inet6_ifaddr {
|
|||
unsigned long cstamp; /* created timestamp */
|
||||
unsigned long tstamp; /* updated timestamp */
|
||||
|
||||
struct timer_list dad_timer;
|
||||
struct delayed_work dad_work;
|
||||
|
||||
struct inet6_dev *idev;
|
||||
struct rt6_info *rt;
|
||||
|
|
|
@ -1600,15 +1600,31 @@ void trace_buffer_unlock_commit(struct ring_buffer *buffer,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
|
||||
|
||||
static struct ring_buffer *temp_buffer;
|
||||
|
||||
struct ring_buffer_event *
|
||||
trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
|
||||
struct ftrace_event_file *ftrace_file,
|
||||
int type, unsigned long len,
|
||||
unsigned long flags, int pc)
|
||||
{
|
||||
struct ring_buffer_event *entry;
|
||||
|
||||
*current_rb = ftrace_file->tr->trace_buffer.buffer;
|
||||
return trace_buffer_lock_reserve(*current_rb,
|
||||
entry = trace_buffer_lock_reserve(*current_rb,
|
||||
type, len, flags, pc);
|
||||
/*
|
||||
* If tracing is off, but we have triggers enabled
|
||||
* we still need to look at the event data. Use the temp_buffer
|
||||
* to store the trace event for the tigger to use. It's recusive
|
||||
* safe and will not be recorded anywhere.
|
||||
*/
|
||||
if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
|
||||
*current_rb = temp_buffer;
|
||||
entry = trace_buffer_lock_reserve(*current_rb,
|
||||
type, len, flags, pc);
|
||||
}
|
||||
return entry;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
|
||||
|
||||
|
@ -6494,11 +6510,16 @@ __init static int tracer_alloc_buffers(void)
|
|||
|
||||
raw_spin_lock_init(&global_trace.start_lock);
|
||||
|
||||
/* Used for event triggers */
|
||||
temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
|
||||
if (!temp_buffer)
|
||||
goto out_free_cpumask;
|
||||
|
||||
/* TODO: make the number of buffers hot pluggable with CPUS */
|
||||
if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
|
||||
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
|
||||
WARN_ON(1);
|
||||
goto out_free_cpumask;
|
||||
goto out_free_temp_buffer;
|
||||
}
|
||||
|
||||
if (global_trace.buffer_disabled)
|
||||
|
@ -6540,6 +6561,8 @@ __init static int tracer_alloc_buffers(void)
|
|||
|
||||
return 0;
|
||||
|
||||
out_free_temp_buffer:
|
||||
ring_buffer_free(temp_buffer);
|
||||
out_free_cpumask:
|
||||
free_percpu(global_trace.trace_buffer.data);
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
|
|
|
@ -244,8 +244,19 @@ static void __prandom_reseed(bool late)
|
|||
static bool latch = false;
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
|
||||
/* Asking for random bytes might result in bytes getting
|
||||
* moved into the nonblocking pool and thus marking it
|
||||
* as initialized. In this case we would double back into
|
||||
* this function and attempt to do a late reseed.
|
||||
* Ignore the pointless attempt to reseed again if we're
|
||||
* already waiting for bytes when the nonblocking pool
|
||||
* got initialized.
|
||||
*/
|
||||
|
||||
/* only allow initial seeding (late == false) once */
|
||||
spin_lock_irqsave(&lock, flags);
|
||||
if (!spin_trylock_irqsave(&lock, flags))
|
||||
return;
|
||||
|
||||
if (latch && !late)
|
||||
goto out;
|
||||
latch = true;
|
||||
|
|
|
@ -307,9 +307,11 @@ static void vlan_sync_address(struct net_device *dev,
|
|||
static void vlan_transfer_features(struct net_device *dev,
|
||||
struct net_device *vlandev)
|
||||
{
|
||||
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
|
||||
|
||||
vlandev->gso_max_size = dev->gso_max_size;
|
||||
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
|
||||
if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
|
||||
vlandev->hard_header_len = dev->hard_header_len;
|
||||
else
|
||||
vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
|
||||
|
|
|
@ -578,6 +578,9 @@ static int vlan_dev_init(struct net_device *dev)
|
|||
|
||||
dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
|
||||
dev->gso_max_size = real_dev->gso_max_size;
|
||||
if (dev->features & NETIF_F_VLAN_FEATURES)
|
||||
netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
|
||||
|
||||
|
||||
/* ipv6 shared card related stuff */
|
||||
dev->dev_id = real_dev->dev_id;
|
||||
|
@ -592,7 +595,8 @@ static int vlan_dev_init(struct net_device *dev)
|
|||
#endif
|
||||
|
||||
dev->needed_headroom = real_dev->needed_headroom;
|
||||
if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
|
||||
if (vlan_hw_offload_capable(real_dev->features,
|
||||
vlan_dev_priv(dev)->vlan_proto)) {
|
||||
dev->header_ops = &vlan_passthru_header_ops;
|
||||
dev->hard_header_len = real_dev->hard_header_len;
|
||||
} else {
|
||||
|
|
|
@ -49,14 +49,14 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
brstats->tx_bytes += skb->len;
|
||||
u64_stats_update_end(&brstats->syncp);
|
||||
|
||||
if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
|
||||
goto out;
|
||||
|
||||
BR_INPUT_SKB_CB(skb)->brdev = dev;
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
skb_pull(skb, ETH_HLEN);
|
||||
|
||||
if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
|
||||
goto out;
|
||||
|
||||
if (is_broadcast_ether_addr(dest))
|
||||
br_flood_deliver(br, skb, false);
|
||||
else if (is_multicast_ether_addr(dest)) {
|
||||
|
|
|
@ -29,6 +29,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
|
|||
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
|
||||
struct net_bridge *br = netdev_priv(brdev);
|
||||
struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
|
||||
struct net_port_vlans *pv;
|
||||
|
||||
u64_stats_update_begin(&brstats->syncp);
|
||||
brstats->rx_packets++;
|
||||
|
@ -39,18 +40,18 @@ static int br_pass_frame_up(struct sk_buff *skb)
|
|||
* packet is allowed except in promisc modue when someone
|
||||
* may be running packet capture.
|
||||
*/
|
||||
pv = br_get_vlan_info(br);
|
||||
if (!(brdev->flags & IFF_PROMISC) &&
|
||||
!br_allowed_egress(br, br_get_vlan_info(br), skb)) {
|
||||
!br_allowed_egress(br, pv, skb)) {
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
skb = br_handle_vlan(br, br_get_vlan_info(br), skb);
|
||||
if (!skb)
|
||||
return NET_RX_DROP;
|
||||
|
||||
indev = skb->dev;
|
||||
skb->dev = brdev;
|
||||
skb = br_handle_vlan(br, pv, skb);
|
||||
if (!skb)
|
||||
return NET_RX_DROP;
|
||||
|
||||
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
|
||||
netif_receive_skb);
|
||||
|
|
|
@ -119,22 +119,6 @@ static void __vlan_flush(struct net_port_vlans *v)
|
|||
kfree_rcu(v, rcu);
|
||||
}
|
||||
|
||||
/* Strip the tag from the packet. Will return skb with tci set 0. */
|
||||
static struct sk_buff *br_vlan_untag(struct sk_buff *skb)
|
||||
{
|
||||
if (skb->protocol != htons(ETH_P_8021Q)) {
|
||||
skb->vlan_tci = 0;
|
||||
return skb;
|
||||
}
|
||||
|
||||
skb->vlan_tci = 0;
|
||||
skb = vlan_untag(skb);
|
||||
if (skb)
|
||||
skb->vlan_tci = 0;
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
struct sk_buff *br_handle_vlan(struct net_bridge *br,
|
||||
const struct net_port_vlans *pv,
|
||||
struct sk_buff *skb)
|
||||
|
@ -144,13 +128,27 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
|
|||
if (!br->vlan_enabled)
|
||||
goto out;
|
||||
|
||||
/* Vlan filter table must be configured at this point. The
|
||||
* only exception is the bridge is set in promisc mode and the
|
||||
* packet is destined for the bridge device. In this case
|
||||
* pass the packet as is.
|
||||
*/
|
||||
if (!pv) {
|
||||
if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
|
||||
goto out;
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* At this point, we know that the frame was filtered and contains
|
||||
* a valid vlan id. If the vlan id is set in the untagged bitmap,
|
||||
* send untagged; otherwise, send tagged.
|
||||
*/
|
||||
br_vlan_get_tag(skb, &vid);
|
||||
if (test_bit(vid, pv->untagged_bitmap))
|
||||
skb = br_vlan_untag(skb);
|
||||
skb->vlan_tci = 0;
|
||||
|
||||
out:
|
||||
return skb;
|
||||
|
@ -174,6 +172,18 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
|
|||
if (!v)
|
||||
return false;
|
||||
|
||||
/* If vlan tx offload is disabled on bridge device and frame was
|
||||
* sent from vlan device on the bridge device, it does not have
|
||||
* HW accelerated vlan tag.
|
||||
*/
|
||||
if (unlikely(!vlan_tx_tag_present(skb) &&
|
||||
(skb->protocol == htons(ETH_P_8021Q) ||
|
||||
skb->protocol == htons(ETH_P_8021AD)))) {
|
||||
skb = vlan_untag(skb);
|
||||
if (unlikely(!skb))
|
||||
return false;
|
||||
}
|
||||
|
||||
err = br_vlan_get_tag(skb, vid);
|
||||
if (!*vid) {
|
||||
u16 pvid = br_get_pvid(v);
|
||||
|
|
|
@ -2281,7 +2281,7 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL(skb_checksum_help);
|
||||
|
||||
__be16 skb_network_protocol(struct sk_buff *skb)
|
||||
__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
|
||||
{
|
||||
__be16 type = skb->protocol;
|
||||
int vlan_depth = ETH_HLEN;
|
||||
|
@ -2308,6 +2308,8 @@ __be16 skb_network_protocol(struct sk_buff *skb)
|
|||
vlan_depth += VLAN_HLEN;
|
||||
}
|
||||
|
||||
*depth = vlan_depth;
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
|
@ -2321,12 +2323,13 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
|
|||
{
|
||||
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
|
||||
struct packet_offload *ptype;
|
||||
__be16 type = skb_network_protocol(skb);
|
||||
int vlan_depth = skb->mac_len;
|
||||
__be16 type = skb_network_protocol(skb, &vlan_depth);
|
||||
|
||||
if (unlikely(!type))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
__skb_pull(skb, skb->mac_len);
|
||||
__skb_pull(skb, vlan_depth);
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ptype, &offload_base, list) {
|
||||
|
@ -2493,8 +2496,10 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
|
|||
const struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_NONE &&
|
||||
!can_checksum_protocol(features, skb_network_protocol(skb))) {
|
||||
!can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
|
||||
features &= ~NETIF_F_ALL_CSUM;
|
||||
} else if (illegal_highdma(dev, skb)) {
|
||||
features &= ~NETIF_F_SG;
|
||||
|
|
|
@ -2127,25 +2127,31 @@ EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
|
|||
*
|
||||
* The `hlen` as calculated by skb_zerocopy_headlen() specifies the
|
||||
* headroom in the `to` buffer.
|
||||
*
|
||||
* Return value:
|
||||
* 0: everything is OK
|
||||
* -ENOMEM: couldn't orphan frags of @from due to lack of memory
|
||||
* -EFAULT: skb_copy_bits() found some problem with skb geometry
|
||||
*/
|
||||
void
|
||||
skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
|
||||
int
|
||||
skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
|
||||
{
|
||||
int i, j = 0;
|
||||
int plen = 0; /* length of skb->head fragment */
|
||||
int ret;
|
||||
struct page *page;
|
||||
unsigned int offset;
|
||||
|
||||
BUG_ON(!from->head_frag && !hlen);
|
||||
|
||||
/* dont bother with small payloads */
|
||||
if (len <= skb_tailroom(to)) {
|
||||
skb_copy_bits(from, 0, skb_put(to, len), len);
|
||||
return;
|
||||
}
|
||||
if (len <= skb_tailroom(to))
|
||||
return skb_copy_bits(from, 0, skb_put(to, len), len);
|
||||
|
||||
if (hlen) {
|
||||
skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
|
||||
ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
len -= hlen;
|
||||
} else {
|
||||
plen = min_t(int, skb_headlen(from), len);
|
||||
|
@ -2163,6 +2169,11 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
|
|||
to->len += len + plen;
|
||||
to->data_len += len + plen;
|
||||
|
||||
if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
|
||||
skb_tx_error(from);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
|
||||
if (!len)
|
||||
break;
|
||||
|
@ -2173,6 +2184,8 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
|
|||
j++;
|
||||
}
|
||||
skb_shinfo(to)->nr_frags = j;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_zerocopy);
|
||||
|
||||
|
@ -2866,8 +2879,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
|
|||
int err = -ENOMEM;
|
||||
int i = 0;
|
||||
int pos;
|
||||
int dummy;
|
||||
|
||||
proto = skb_network_protocol(head_skb);
|
||||
proto = skb_network_protocol(head_skb, &dummy);
|
||||
if (unlikely(!proto))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
|
|
|
@ -182,6 +182,14 @@ static int gre_cisco_rcv(struct sk_buff *skb)
|
|||
int i;
|
||||
bool csum_err = false;
|
||||
|
||||
#ifdef CONFIG_NET_IPGRE_BROADCAST
|
||||
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
|
||||
/* Looped back packet, drop it! */
|
||||
if (rt_is_output_route(skb_rtable(skb)))
|
||||
goto drop;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (parse_gre_header(skb, &tpi, &csum_err) < 0)
|
||||
goto drop;
|
||||
|
||||
|
|
|
@ -420,9 +420,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
|
|||
|
||||
#ifdef CONFIG_NET_IPGRE_BROADCAST
|
||||
if (ipv4_is_multicast(iph->daddr)) {
|
||||
/* Looped back packet, drop it! */
|
||||
if (rt_is_output_route(skb_rtable(skb)))
|
||||
goto drop;
|
||||
tunnel->dev->stats.multicast++;
|
||||
skb->pkt_type = PACKET_BROADCAST;
|
||||
}
|
||||
|
|
|
@ -108,6 +108,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
|
|||
nf_reset(skb);
|
||||
secpath_reset(skb);
|
||||
skb_clear_hash_if_not_l4(skb);
|
||||
skb_dst_drop(skb);
|
||||
skb->vlan_tci = 0;
|
||||
skb_set_queue_mapping(skb, 0);
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
|
|
|
@ -2628,7 +2628,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
|
|||
{
|
||||
__be32 dest, src;
|
||||
__u16 destp, srcp;
|
||||
long delta = tw->tw_ttd - jiffies;
|
||||
s32 delta = tw->tw_ttd - inet_tw_time_stamp();
|
||||
|
||||
dest = tw->tw_daddr;
|
||||
src = tw->tw_rcv_saddr;
|
||||
|
|
|
@ -133,10 +133,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev);
|
|||
static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
|
||||
static DEFINE_SPINLOCK(addrconf_hash_lock);
|
||||
|
||||
static void addrconf_verify(unsigned long);
|
||||
static void addrconf_verify(void);
|
||||
static void addrconf_verify_rtnl(void);
|
||||
static void addrconf_verify_work(struct work_struct *);
|
||||
|
||||
static DEFINE_TIMER(addr_chk_timer, addrconf_verify, 0, 0);
|
||||
static DEFINE_SPINLOCK(addrconf_verify_lock);
|
||||
static struct workqueue_struct *addrconf_wq;
|
||||
static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
|
||||
|
||||
static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
|
||||
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
|
||||
|
@ -151,7 +153,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
|
|||
u32 flags, u32 noflags);
|
||||
|
||||
static void addrconf_dad_start(struct inet6_ifaddr *ifp);
|
||||
static void addrconf_dad_timer(unsigned long data);
|
||||
static void addrconf_dad_work(struct work_struct *w);
|
||||
static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
|
||||
static void addrconf_dad_run(struct inet6_dev *idev);
|
||||
static void addrconf_rs_timer(unsigned long data);
|
||||
|
@ -247,9 +249,9 @@ static void addrconf_del_rs_timer(struct inet6_dev *idev)
|
|||
__in6_dev_put(idev);
|
||||
}
|
||||
|
||||
static void addrconf_del_dad_timer(struct inet6_ifaddr *ifp)
|
||||
static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
|
||||
{
|
||||
if (del_timer(&ifp->dad_timer))
|
||||
if (cancel_delayed_work(&ifp->dad_work))
|
||||
__in6_ifa_put(ifp);
|
||||
}
|
||||
|
||||
|
@ -261,12 +263,12 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
|
|||
mod_timer(&idev->rs_timer, jiffies + when);
|
||||
}
|
||||
|
||||
static void addrconf_mod_dad_timer(struct inet6_ifaddr *ifp,
|
||||
unsigned long when)
|
||||
static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
|
||||
unsigned long delay)
|
||||
{
|
||||
if (!timer_pending(&ifp->dad_timer))
|
||||
if (!delayed_work_pending(&ifp->dad_work))
|
||||
in6_ifa_hold(ifp);
|
||||
mod_timer(&ifp->dad_timer, jiffies + when);
|
||||
mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
|
||||
}
|
||||
|
||||
static int snmp6_alloc_dev(struct inet6_dev *idev)
|
||||
|
@ -751,8 +753,9 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
|
|||
|
||||
in6_dev_put(ifp->idev);
|
||||
|
||||
if (del_timer(&ifp->dad_timer))
|
||||
pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
|
||||
if (cancel_delayed_work(&ifp->dad_work))
|
||||
pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
|
||||
ifp);
|
||||
|
||||
if (ifp->state != INET6_IFADDR_STATE_DEAD) {
|
||||
pr_warn("Freeing alive inet6 address %p\n", ifp);
|
||||
|
@ -849,8 +852,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
|
|||
|
||||
spin_lock_init(&ifa->lock);
|
||||
spin_lock_init(&ifa->state_lock);
|
||||
setup_timer(&ifa->dad_timer, addrconf_dad_timer,
|
||||
(unsigned long)ifa);
|
||||
INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
|
||||
INIT_HLIST_NODE(&ifa->addr_lst);
|
||||
ifa->scope = scope;
|
||||
ifa->prefix_len = pfxlen;
|
||||
|
@ -990,6 +992,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
|
|||
enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
|
||||
unsigned long expires;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
spin_lock_bh(&ifp->state_lock);
|
||||
state = ifp->state;
|
||||
ifp->state = INET6_IFADDR_STATE_DEAD;
|
||||
|
@ -1021,7 +1025,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
|
|||
|
||||
write_unlock_bh(&ifp->idev->lock);
|
||||
|
||||
addrconf_del_dad_timer(ifp);
|
||||
addrconf_del_dad_work(ifp);
|
||||
|
||||
ipv6_ifa_notify(RTM_DELADDR, ifp);
|
||||
|
||||
|
@ -1604,7 +1608,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
|
|||
{
|
||||
if (ifp->flags&IFA_F_PERMANENT) {
|
||||
spin_lock_bh(&ifp->lock);
|
||||
addrconf_del_dad_timer(ifp);
|
||||
addrconf_del_dad_work(ifp);
|
||||
ifp->flags |= IFA_F_TENTATIVE;
|
||||
if (dad_failed)
|
||||
ifp->flags |= IFA_F_DADFAILED;
|
||||
|
@ -1625,20 +1629,21 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
|
|||
spin_unlock_bh(&ifp->lock);
|
||||
}
|
||||
ipv6_del_addr(ifp);
|
||||
} else
|
||||
} else {
|
||||
ipv6_del_addr(ifp);
|
||||
}
|
||||
}
|
||||
|
||||
static int addrconf_dad_end(struct inet6_ifaddr *ifp)
|
||||
{
|
||||
int err = -ENOENT;
|
||||
|
||||
spin_lock(&ifp->state_lock);
|
||||
spin_lock_bh(&ifp->state_lock);
|
||||
if (ifp->state == INET6_IFADDR_STATE_DAD) {
|
||||
ifp->state = INET6_IFADDR_STATE_POSTDAD;
|
||||
err = 0;
|
||||
}
|
||||
spin_unlock(&ifp->state_lock);
|
||||
spin_unlock_bh(&ifp->state_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1671,7 +1676,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
|
|||
}
|
||||
}
|
||||
|
||||
addrconf_dad_stop(ifp, 1);
|
||||
spin_lock_bh(&ifp->state_lock);
|
||||
/* transition from _POSTDAD to _ERRDAD */
|
||||
ifp->state = INET6_IFADDR_STATE_ERRDAD;
|
||||
spin_unlock_bh(&ifp->state_lock);
|
||||
|
||||
addrconf_mod_dad_work(ifp, 0);
|
||||
}
|
||||
|
||||
/* Join to solicited addr multicast group. */
|
||||
|
@ -1680,6 +1690,8 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
|
|||
{
|
||||
struct in6_addr maddr;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
|
||||
return;
|
||||
|
||||
|
@ -1691,6 +1703,8 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
|
|||
{
|
||||
struct in6_addr maddr;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
|
||||
return;
|
||||
|
||||
|
@ -1701,6 +1715,9 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
|
|||
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
|
||||
{
|
||||
struct in6_addr addr;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (ifp->prefix_len >= 127) /* RFC 6164 */
|
||||
return;
|
||||
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
|
||||
|
@ -1712,6 +1729,9 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
|
|||
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
|
||||
{
|
||||
struct in6_addr addr;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (ifp->prefix_len >= 127) /* RFC 6164 */
|
||||
return;
|
||||
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
|
||||
|
@ -2271,11 +2291,13 @@ ok:
|
|||
return;
|
||||
}
|
||||
|
||||
ifp->flags |= IFA_F_MANAGETEMPADDR;
|
||||
update_lft = 0;
|
||||
create = 1;
|
||||
spin_lock_bh(&ifp->lock);
|
||||
ifp->flags |= IFA_F_MANAGETEMPADDR;
|
||||
ifp->cstamp = jiffies;
|
||||
ifp->tokenized = tokenized;
|
||||
spin_unlock_bh(&ifp->lock);
|
||||
addrconf_dad_start(ifp);
|
||||
}
|
||||
|
||||
|
@ -2326,7 +2348,7 @@ ok:
|
|||
create, now);
|
||||
|
||||
in6_ifa_put(ifp);
|
||||
addrconf_verify(0);
|
||||
addrconf_verify();
|
||||
}
|
||||
}
|
||||
inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
|
||||
|
@ -2475,7 +2497,7 @@ static int inet6_addr_add(struct net *net, int ifindex,
|
|||
manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
|
||||
true, jiffies);
|
||||
in6_ifa_put(ifp);
|
||||
addrconf_verify(0);
|
||||
addrconf_verify_rtnl();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3011,7 +3033,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
|
|||
hlist_for_each_entry_rcu(ifa, h, addr_lst) {
|
||||
if (ifa->idev == idev) {
|
||||
hlist_del_init_rcu(&ifa->addr_lst);
|
||||
addrconf_del_dad_timer(ifa);
|
||||
addrconf_del_dad_work(ifa);
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
|
@ -3049,7 +3071,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
|
|||
while (!list_empty(&idev->addr_list)) {
|
||||
ifa = list_first_entry(&idev->addr_list,
|
||||
struct inet6_ifaddr, if_list);
|
||||
addrconf_del_dad_timer(ifa);
|
||||
addrconf_del_dad_work(ifa);
|
||||
|
||||
list_del(&ifa->if_list);
|
||||
|
||||
|
@ -3148,10 +3170,10 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
|
|||
rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
|
||||
|
||||
ifp->dad_probes = idev->cnf.dad_transmits;
|
||||
addrconf_mod_dad_timer(ifp, rand_num);
|
||||
addrconf_mod_dad_work(ifp, rand_num);
|
||||
}
|
||||
|
||||
static void addrconf_dad_start(struct inet6_ifaddr *ifp)
|
||||
static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
|
||||
{
|
||||
struct inet6_dev *idev = ifp->idev;
|
||||
struct net_device *dev = idev->dev;
|
||||
|
@ -3203,25 +3225,68 @@ out:
|
|||
read_unlock_bh(&idev->lock);
|
||||
}
|
||||
|
||||
static void addrconf_dad_timer(unsigned long data)
|
||||
static void addrconf_dad_start(struct inet6_ifaddr *ifp)
|
||||
{
|
||||
struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data;
|
||||
bool begin_dad = false;
|
||||
|
||||
spin_lock_bh(&ifp->state_lock);
|
||||
if (ifp->state != INET6_IFADDR_STATE_DEAD) {
|
||||
ifp->state = INET6_IFADDR_STATE_PREDAD;
|
||||
begin_dad = true;
|
||||
}
|
||||
spin_unlock_bh(&ifp->state_lock);
|
||||
|
||||
if (begin_dad)
|
||||
addrconf_mod_dad_work(ifp, 0);
|
||||
}
|
||||
|
||||
static void addrconf_dad_work(struct work_struct *w)
|
||||
{
|
||||
struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
|
||||
struct inet6_ifaddr,
|
||||
dad_work);
|
||||
struct inet6_dev *idev = ifp->idev;
|
||||
struct in6_addr mcaddr;
|
||||
|
||||
enum {
|
||||
DAD_PROCESS,
|
||||
DAD_BEGIN,
|
||||
DAD_ABORT,
|
||||
} action = DAD_PROCESS;
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
spin_lock_bh(&ifp->state_lock);
|
||||
if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
|
||||
action = DAD_BEGIN;
|
||||
ifp->state = INET6_IFADDR_STATE_DAD;
|
||||
} else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
|
||||
action = DAD_ABORT;
|
||||
ifp->state = INET6_IFADDR_STATE_POSTDAD;
|
||||
}
|
||||
spin_unlock_bh(&ifp->state_lock);
|
||||
|
||||
if (action == DAD_BEGIN) {
|
||||
addrconf_dad_begin(ifp);
|
||||
goto out;
|
||||
} else if (action == DAD_ABORT) {
|
||||
addrconf_dad_stop(ifp, 1);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!ifp->dad_probes && addrconf_dad_end(ifp))
|
||||
goto out;
|
||||
|
||||
write_lock(&idev->lock);
|
||||
write_lock_bh(&idev->lock);
|
||||
if (idev->dead || !(idev->if_flags & IF_READY)) {
|
||||
write_unlock(&idev->lock);
|
||||
write_unlock_bh(&idev->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock(&ifp->lock);
|
||||
if (ifp->state == INET6_IFADDR_STATE_DEAD) {
|
||||
spin_unlock(&ifp->lock);
|
||||
write_unlock(&idev->lock);
|
||||
write_unlock_bh(&idev->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -3232,7 +3297,7 @@ static void addrconf_dad_timer(unsigned long data)
|
|||
|
||||
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
|
||||
spin_unlock(&ifp->lock);
|
||||
write_unlock(&idev->lock);
|
||||
write_unlock_bh(&idev->lock);
|
||||
|
||||
addrconf_dad_completed(ifp);
|
||||
|
||||
|
@ -3240,16 +3305,17 @@ static void addrconf_dad_timer(unsigned long data)
|
|||
}
|
||||
|
||||
ifp->dad_probes--;
|
||||
addrconf_mod_dad_timer(ifp,
|
||||
NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
|
||||
addrconf_mod_dad_work(ifp,
|
||||
NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
|
||||
spin_unlock(&ifp->lock);
|
||||
write_unlock(&idev->lock);
|
||||
write_unlock_bh(&idev->lock);
|
||||
|
||||
/* send a neighbour solicitation for our addr */
|
||||
addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
|
||||
ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
|
||||
out:
|
||||
in6_ifa_put(ifp);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/* ifp->idev must be at least read locked */
|
||||
|
@ -3276,7 +3342,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
|
|||
struct in6_addr lladdr;
|
||||
bool send_rs, send_mld;
|
||||
|
||||
addrconf_del_dad_timer(ifp);
|
||||
addrconf_del_dad_work(ifp);
|
||||
|
||||
/*
|
||||
* Configure the address for reception. Now it is valid.
|
||||
|
@ -3517,23 +3583,23 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
|
|||
* Periodic address status verification
|
||||
*/
|
||||
|
||||
static void addrconf_verify(unsigned long foo)
|
||||
static void addrconf_verify_rtnl(void)
|
||||
{
|
||||
unsigned long now, next, next_sec, next_sched;
|
||||
struct inet6_ifaddr *ifp;
|
||||
int i;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
rcu_read_lock_bh();
|
||||
spin_lock(&addrconf_verify_lock);
|
||||
now = jiffies;
|
||||
next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
|
||||
|
||||
del_timer(&addr_chk_timer);
|
||||
cancel_delayed_work(&addr_chk_work);
|
||||
|
||||
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
|
||||
restart:
|
||||
hlist_for_each_entry_rcu_bh(ifp,
|
||||
&inet6_addr_lst[i], addr_lst) {
|
||||
hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
|
||||
unsigned long age;
|
||||
|
||||
/* When setting preferred_lft to a value not zero or
|
||||
|
@ -3628,13 +3694,22 @@ restart:
|
|||
|
||||
ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
|
||||
now, next, next_sec, next_sched);
|
||||
|
||||
addr_chk_timer.expires = next_sched;
|
||||
add_timer(&addr_chk_timer);
|
||||
spin_unlock(&addrconf_verify_lock);
|
||||
mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
|
||||
static void addrconf_verify_work(struct work_struct *w)
|
||||
{
|
||||
rtnl_lock();
|
||||
addrconf_verify_rtnl();
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void addrconf_verify(void)
|
||||
{
|
||||
mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
|
||||
}
|
||||
|
||||
static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
|
||||
struct in6_addr **peer_pfx)
|
||||
{
|
||||
|
@ -3691,6 +3766,8 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
|
|||
bool was_managetempaddr;
|
||||
bool had_prefixroute;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (!valid_lft || (prefered_lft > valid_lft))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -3756,7 +3833,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
|
|||
!was_managetempaddr, jiffies);
|
||||
}
|
||||
|
||||
addrconf_verify(0);
|
||||
addrconf_verify_rtnl();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4386,6 +4463,8 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
|
|||
bool update_rs = false;
|
||||
struct in6_addr ll_addr;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (token == NULL)
|
||||
return -EINVAL;
|
||||
if (ipv6_addr_any(token))
|
||||
|
@ -4434,7 +4513,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
|
|||
}
|
||||
|
||||
write_unlock_bh(&idev->lock);
|
||||
addrconf_verify(0);
|
||||
addrconf_verify_rtnl();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4636,6 +4715,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
|
|||
{
|
||||
struct net *net = dev_net(ifp->idev->dev);
|
||||
|
||||
if (event)
|
||||
ASSERT_RTNL();
|
||||
|
||||
inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
|
||||
|
||||
switch (event) {
|
||||
|
@ -5244,6 +5326,12 @@ int __init addrconf_init(void)
|
|||
if (err < 0)
|
||||
goto out_addrlabel;
|
||||
|
||||
addrconf_wq = create_workqueue("ipv6_addrconf");
|
||||
if (!addrconf_wq) {
|
||||
err = -ENOMEM;
|
||||
goto out_nowq;
|
||||
}
|
||||
|
||||
/* The addrconf netdev notifier requires that loopback_dev
|
||||
* has it's ipv6 private information allocated and setup
|
||||
* before it can bring up and give link-local addresses
|
||||
|
@ -5274,7 +5362,7 @@ int __init addrconf_init(void)
|
|||
|
||||
register_netdevice_notifier(&ipv6_dev_notf);
|
||||
|
||||
addrconf_verify(0);
|
||||
addrconf_verify();
|
||||
|
||||
rtnl_af_register(&inet6_ops);
|
||||
|
||||
|
@ -5302,6 +5390,8 @@ errout:
|
|||
rtnl_af_unregister(&inet6_ops);
|
||||
unregister_netdevice_notifier(&ipv6_dev_notf);
|
||||
errlo:
|
||||
destroy_workqueue(addrconf_wq);
|
||||
out_nowq:
|
||||
unregister_pernet_subsys(&addrconf_ops);
|
||||
out_addrlabel:
|
||||
ipv6_addr_label_cleanup();
|
||||
|
@ -5337,7 +5427,8 @@ void addrconf_cleanup(void)
|
|||
for (i = 0; i < IN6_ADDR_HSIZE; i++)
|
||||
WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
|
||||
spin_unlock_bh(&addrconf_hash_lock);
|
||||
|
||||
del_timer(&addr_chk_timer);
|
||||
cancel_delayed_work(&addr_chk_work);
|
||||
rtnl_unlock();
|
||||
|
||||
destroy_workqueue(addrconf_wq);
|
||||
}
|
||||
|
|
|
@ -354,13 +354,16 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
|
|||
|
||||
skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
|
||||
GFP_ATOMIC);
|
||||
if (!skb)
|
||||
if (!skb) {
|
||||
skb_tx_error(entskb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
nlh = nlmsg_put(skb, 0, 0,
|
||||
NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
|
||||
sizeof(struct nfgenmsg), 0);
|
||||
if (!nlh) {
|
||||
skb_tx_error(entskb);
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -488,13 +491,15 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
|
|||
nla->nla_type = NFQA_PAYLOAD;
|
||||
nla->nla_len = nla_attr_size(data_len);
|
||||
|
||||
skb_zerocopy(skb, entskb, data_len, hlen);
|
||||
if (skb_zerocopy(skb, entskb, data_len, hlen))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
|
||||
nlh->nlmsg_len = skb->len;
|
||||
return skb;
|
||||
|
||||
nla_put_failure:
|
||||
skb_tx_error(entskb);
|
||||
kfree_skb(skb);
|
||||
net_err_ratelimited("nf_queue: error creating packet message\n");
|
||||
return NULL;
|
||||
|
|
|
@ -464,7 +464,9 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
|
|||
}
|
||||
nla->nla_len = nla_attr_size(skb->len);
|
||||
|
||||
skb_zerocopy(user_skb, skb, skb->len, hlen);
|
||||
err = skb_zerocopy(user_skb, skb, skb->len, hlen);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
|
||||
if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
|
||||
|
@ -478,6 +480,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
|
|||
|
||||
err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
|
||||
out:
|
||||
if (err)
|
||||
skb_tx_error(skb);
|
||||
kfree_skb(nskb);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -103,30 +103,24 @@ static void stats_read(struct flow_stats *stats,
|
|||
void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
|
||||
unsigned long *used, __be16 *tcp_flags)
|
||||
{
|
||||
int cpu, cur_cpu;
|
||||
int cpu;
|
||||
|
||||
*used = 0;
|
||||
*tcp_flags = 0;
|
||||
memset(ovs_stats, 0, sizeof(*ovs_stats));
|
||||
|
||||
local_bh_disable();
|
||||
if (!flow->stats.is_percpu) {
|
||||
stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
|
||||
} else {
|
||||
cur_cpu = get_cpu();
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct flow_stats *stats;
|
||||
|
||||
if (cpu == cur_cpu)
|
||||
local_bh_disable();
|
||||
|
||||
stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
|
||||
stats_read(stats, ovs_stats, used, tcp_flags);
|
||||
|
||||
if (cpu == cur_cpu)
|
||||
local_bh_enable();
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static void stats_reset(struct flow_stats *stats)
|
||||
|
@ -141,25 +135,17 @@ static void stats_reset(struct flow_stats *stats)
|
|||
|
||||
void ovs_flow_stats_clear(struct sw_flow *flow)
|
||||
{
|
||||
int cpu, cur_cpu;
|
||||
int cpu;
|
||||
|
||||
local_bh_disable();
|
||||
if (!flow->stats.is_percpu) {
|
||||
stats_reset(flow->stats.stat);
|
||||
} else {
|
||||
cur_cpu = get_cpu();
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
|
||||
if (cpu == cur_cpu)
|
||||
local_bh_disable();
|
||||
|
||||
stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
|
||||
|
||||
if (cpu == cur_cpu)
|
||||
local_bh_enable();
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static int check_header(struct sk_buff *skb, int len)
|
||||
|
|
|
@ -1787,8 +1787,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
goto out;
|
||||
|
||||
err = mutex_lock_interruptible(&u->readlock);
|
||||
if (err) {
|
||||
err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
|
||||
if (unlikely(err)) {
|
||||
/* recvmsg() in non blocking mode is supposed to return -EAGAIN
|
||||
* sk_rcvtimeo is not honored by mutex_lock_interruptible()
|
||||
*/
|
||||
err = noblock ? -EAGAIN : -ERESTARTSYS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1913,6 +1916,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
struct unix_sock *u = unix_sk(sk);
|
||||
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
|
||||
int copied = 0;
|
||||
int noblock = flags & MSG_DONTWAIT;
|
||||
int check_creds = 0;
|
||||
int target;
|
||||
int err = 0;
|
||||
|
@ -1928,7 +1932,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
goto out;
|
||||
|
||||
target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
|
||||
timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
|
||||
timeo = sock_rcvtimeo(sk, noblock);
|
||||
|
||||
/* Lock the socket to prevent queue disordering
|
||||
* while sleeps in memcpy_tomsg
|
||||
|
@ -1940,8 +1944,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
}
|
||||
|
||||
err = mutex_lock_interruptible(&u->readlock);
|
||||
if (err) {
|
||||
err = sock_intr_errno(timeo);
|
||||
if (unlikely(err)) {
|
||||
/* recvmsg() in non blocking mode is supposed to return -EAGAIN
|
||||
* sk_rcvtimeo is not honored by mutex_lock_interruptible()
|
||||
*/
|
||||
err = noblock ? -EAGAIN : -ERESTARTSYS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue