Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
	drivers/net/benet/be_main.c
This commit is contained in:
David S. Miller 2011-04-07 14:05:23 -07:00
commit c1e48efc70
49 changed files with 273 additions and 161 deletions

View File

@ -6916,6 +6916,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
S: Maintained S: Maintained
F: drivers/platform/x86 F: drivers/platform/x86
XEN NETWORK BACKEND DRIVER
M: Ian Campbell <ian.campbell@citrix.com>
L: xen-devel@lists.xensource.com (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/xen-netback/*
XEN PCI SUBSYSTEM XEN PCI SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: xen-devel@lists.xensource.com (moderated for non-subscribers) L: xen-devel@lists.xensource.com (moderated for non-subscribers)

View File

@ -155,7 +155,7 @@ struct be_eq_obj {
u16 min_eqd; /* in usecs */ u16 min_eqd; /* in usecs */
u16 max_eqd; /* in usecs */ u16 max_eqd; /* in usecs */
u16 cur_eqd; /* in usecs */ u16 cur_eqd; /* in usecs */
u8 msix_vec_idx; u8 eq_idx;
struct napi_struct napi; struct napi_struct napi;
}; };
@ -292,7 +292,7 @@ struct be_adapter {
u32 num_rx_qs; u32 num_rx_qs;
u32 big_page_size; /* Compounded page size shared by rx wrbs */ u32 big_page_size; /* Compounded page size shared by rx wrbs */
u8 msix_vec_next_idx; u8 eq_next_idx;
struct be_drv_stats drv_stats; struct be_drv_stats drv_stats;
struct vlan_group *vlan_grp; struct vlan_group *vlan_grp;

View File

@ -1509,7 +1509,7 @@ static int be_tx_queues_create(struct be_adapter *adapter)
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
goto tx_eq_free; goto tx_eq_free;
adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++; adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
/* Alloc TX eth compl queue */ /* Alloc TX eth compl queue */
@ -1621,7 +1621,7 @@ static int be_rx_queues_create(struct be_adapter *adapter)
if (rc) if (rc)
goto err; goto err;
rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++; rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
/* CQ */ /* CQ */
cq = &rxo->cq; cq = &rxo->cq;
@ -1697,11 +1697,11 @@ static irqreturn_t be_intx(int irq, void *dev)
if (!isr) if (!isr)
return IRQ_NONE; return IRQ_NONE;
if ((1 << adapter->tx_eq.msix_vec_idx & isr)) if ((1 << adapter->tx_eq.eq_idx & isr))
event_handle(adapter, &adapter->tx_eq); event_handle(adapter, &adapter->tx_eq);
for_all_rx_queues(adapter, rxo, i) { for_all_rx_queues(adapter, rxo, i) {
if ((1 << rxo->rx_eq.msix_vec_idx & isr)) if ((1 << rxo->rx_eq.eq_idx & isr))
event_handle(adapter, &rxo->rx_eq); event_handle(adapter, &rxo->rx_eq);
} }
} }
@ -1964,7 +1964,7 @@ static void be_sriov_disable(struct be_adapter *adapter)
static inline int be_msix_vec_get(struct be_adapter *adapter, static inline int be_msix_vec_get(struct be_adapter *adapter,
struct be_eq_obj *eq_obj) struct be_eq_obj *eq_obj)
{ {
return adapter->msix_entries[eq_obj->msix_vec_idx].vector; return adapter->msix_entries[eq_obj->eq_idx].vector;
} }
static int be_request_irq(struct be_adapter *adapter, static int be_request_irq(struct be_adapter *adapter,
@ -2356,6 +2356,7 @@ static int be_clear(struct be_adapter *adapter)
be_mcc_queues_destroy(adapter); be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter); be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter); be_tx_queues_destroy(adapter);
adapter->eq_next_idx = 0;
if (be_physfn(adapter) && adapter->sriov_enabled) if (be_physfn(adapter) && adapter->sriov_enabled)
for (vf = 0; vf < num_vfs; vf++) for (vf = 0; vf < num_vfs; vf++)
@ -3152,11 +3153,13 @@ static int be_resume(struct pci_dev *pdev)
static void be_shutdown(struct pci_dev *pdev) static void be_shutdown(struct pci_dev *pdev)
{ {
struct be_adapter *adapter = pci_get_drvdata(pdev); struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
if (!adapter)
return;
cancel_delayed_work_sync(&adapter->work); cancel_delayed_work_sync(&adapter->work);
netif_device_detach(netdev); netif_device_detach(adapter->netdev);
be_cmd_reset_function(adapter); be_cmd_reset_function(adapter);

View File

@ -2219,13 +2219,9 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
static void static void
bfa_ioc_recover(struct bfa_ioc *ioc) bfa_ioc_recover(struct bfa_ioc *ioc)
{ {
u16 bdf; pr_crit("Heart Beat of IOC has failed\n");
bfa_ioc_stats(ioc, ioc_hbfails);
bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 | bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
ioc->pcidev.device_id);
pr_crit("Firmware heartbeat failure at %d", bdf);
BUG_ON(1);
} }
static void static void

View File

@ -931,7 +931,8 @@ static int mcp251x_open(struct net_device *net)
priv->tx_len = 0; priv->tx_len = 0;
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
IRQF_TRIGGER_FALLING, DEVICE_NAME, priv); pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
DEVICE_NAME, priv);
if (ret) { if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
if (pdata->transceiver_enable) if (pdata->transceiver_enable)

View File

@ -317,7 +317,7 @@ static void pppoe_flush_dev(struct net_device *dev)
lock_sock(sk); lock_sock(sk);
if (po->pppoe_dev == dev && if (po->pppoe_dev == dev &&
sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
pppox_unbind_sock(sk); pppox_unbind_sock(sk);
sk->sk_state = PPPOX_ZOMBIE; sk->sk_state = PPPOX_ZOMBIE;
sk->sk_state_change(sk); sk->sk_state_change(sk);

View File

@ -1818,6 +1818,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
SMSC_TRACE(pdata, probe, "PHY will be autodetected."); SMSC_TRACE(pdata, probe, "PHY will be autodetected.");
spin_lock_init(&pdata->dev_lock); spin_lock_init(&pdata->dev_lock);
spin_lock_init(&pdata->mac_lock);
if (pdata->ioaddr == 0) { if (pdata->ioaddr == 0) {
SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000"); SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000");
@ -1897,8 +1898,11 @@ static int __devinit smsc911x_init(struct net_device *dev)
/* workaround for platforms without an eeprom, where the mac address /* workaround for platforms without an eeprom, where the mac address
* is stored elsewhere and set by the bootloader. This saves the * is stored elsewhere and set by the bootloader. This saves the
* mac address before resetting the device */ * mac address before resetting the device */
if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) {
spin_lock_irq(&pdata->mac_lock);
smsc911x_read_mac_address(dev); smsc911x_read_mac_address(dev);
spin_unlock_irq(&pdata->mac_lock);
}
/* Reset the LAN911x */ /* Reset the LAN911x */
if (smsc911x_soft_reset(pdata)) if (smsc911x_soft_reset(pdata))
@ -2058,8 +2062,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
"Network interface: \"%s\"", dev->name); "Network interface: \"%s\"", dev->name);
} }
spin_lock_init(&pdata->mac_lock);
retval = smsc911x_mii_init(pdev, dev); retval = smsc911x_mii_init(pdev, dev);
if (retval) { if (retval) {
SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);

View File

@ -2546,6 +2546,7 @@ static struct {
{ AR_SREV_VERSION_9287, "9287" }, { AR_SREV_VERSION_9287, "9287" },
{ AR_SREV_VERSION_9271, "9271" }, { AR_SREV_VERSION_9271, "9271" },
{ AR_SREV_VERSION_9300, "9300" }, { AR_SREV_VERSION_9300, "9300" },
{ AR_SREV_VERSION_9485, "9485" },
}; };
/* For devices with external radios */ /* For devices with external radios */

View File

@ -1536,7 +1536,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
dmaaddr = meta->dmaaddr; dmaaddr = meta->dmaaddr;
goto drop_recycle_buffer; goto drop_recycle_buffer;
} }
if (unlikely(len > ring->rx_buffersize)) { if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
/* The data did not fit into one descriptor buffer /* The data did not fit into one descriptor buffer
* and is split over multiple buffers. * and is split over multiple buffers.
* This should never happen, as we try to allocate buffers * This should never happen, as we try to allocate buffers

View File

@ -163,7 +163,7 @@ struct b43_dmadesc_generic {
/* DMA engine tuning knobs */ /* DMA engine tuning knobs */
#define B43_TXRING_SLOTS 256 #define B43_TXRING_SLOTS 256
#define B43_RXRING_SLOTS 64 #define B43_RXRING_SLOTS 64
#define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN #define B43_DMA0_RX_BUFFERSIZE (B43_DMA0_RX_FRAMEOFFSET + IEEE80211_MAX_FRAME_LEN)
/* Pointer poison */ /* Pointer poison */
#define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM)) #define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM))

View File

@ -241,7 +241,7 @@ struct iwl_eeprom_enhanced_txpwr {
/* 6x00 Specific */ /* 6x00 Specific */
#define EEPROM_6000_TX_POWER_VERSION (4) #define EEPROM_6000_TX_POWER_VERSION (4)
#define EEPROM_6000_EEPROM_VERSION (0x434) #define EEPROM_6000_EEPROM_VERSION (0x423)
/* 6x50 Specific */ /* 6x50 Specific */
#define EEPROM_6050_TX_POWER_VERSION (4) #define EEPROM_6050_TX_POWER_VERSION (4)

View File

@ -56,6 +56,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */
{USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
{USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
{USB_DEVICE(0x0bf8, 0x1007)}, /* Fujitsu E-5400 USB */
{USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */
{USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
{USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
@ -68,6 +69,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */
{USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */
{USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */ {USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */
{USB_DEVICE(0x2001, 0x3762)}, /* Conceptronic C54U */
{USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */ {USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */
{USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */

View File

@ -1062,8 +1062,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
* Stop all work. * Stop all work.
*/ */
cancel_work_sync(&rt2x00dev->intf_work); cancel_work_sync(&rt2x00dev->intf_work);
cancel_work_sync(&rt2x00dev->rxdone_work); if (rt2x00_is_usb(rt2x00dev)) {
cancel_work_sync(&rt2x00dev->txdone_work); cancel_work_sync(&rt2x00dev->rxdone_work);
cancel_work_sync(&rt2x00dev->txdone_work);
}
destroy_workqueue(rt2x00dev->workqueue); destroy_workqueue(rt2x00dev->workqueue);
/* /*

View File

@ -685,7 +685,7 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
u8 efuse_data, word_cnts = 0; u8 efuse_data, word_cnts = 0;
u16 efuse_addr = 0; u16 efuse_addr = 0;
u8 hworden; u8 hworden = 0;
u8 tmpdata[8]; u8 tmpdata[8];
if (data == NULL) if (data == NULL)

View File

@ -303,7 +303,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
u16 box_reg, box_extreg; u16 box_reg, box_extreg;
u8 u1b_tmp; u8 u1b_tmp;
bool isfw_read = false; bool isfw_read = false;
u8 buf_index; u8 buf_index = 0;
bool bwrite_sucess = false; bool bwrite_sucess = false;
u8 wait_h2c_limmit = 100; u8 wait_h2c_limmit = 100;
u8 wait_writeh2c_limmit = 100; u8 wait_writeh2c_limmit = 100;

View File

@ -246,7 +246,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw) static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
{ {
struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_priv __maybe_unused *rtlpriv = rtl_priv(hw);
mutex_destroy(&rtlpriv->io.bb_mutex); mutex_destroy(&rtlpriv->io.bb_mutex);
} }

View File

@ -340,7 +340,7 @@ module_init(wl1271_init);
module_exit(wl1271_exit); module_exit(wl1271_exit);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME); MODULE_FIRMWARE(WL1271_FW_NAME);
MODULE_FIRMWARE(WL1271_AP_FW_NAME); MODULE_FIRMWARE(WL1271_AP_FW_NAME);

View File

@ -487,7 +487,7 @@ module_init(wl1271_init);
module_exit(wl1271_exit); module_exit(wl1271_exit);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME); MODULE_FIRMWARE(WL1271_FW_NAME);
MODULE_FIRMWARE(WL1271_AP_FW_NAME); MODULE_FIRMWARE(WL1271_AP_FW_NAME);

View File

@ -204,7 +204,10 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
kfree(wl->nvs); kfree(wl->nvs);
wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL); if (len != sizeof(struct wl1271_nvs_file))
return -EINVAL;
wl->nvs = kzalloc(len, GFP_KERNEL);
if (!wl->nvs) { if (!wl->nvs) {
wl1271_error("could not allocate memory for the nvs file"); wl1271_error("could not allocate memory for the nvs file");
ret = -ENOMEM; ret = -ENOMEM;

View File

@ -643,7 +643,7 @@ static void rx_urb_complete(struct urb *urb)
usb = urb->context; usb = urb->context;
rx = &usb->rx; rx = &usb->rx;
zd_usb_reset_rx_idle_timer(usb); tasklet_schedule(&rx->reset_timer_tasklet);
if (length%rx->usb_packet_size > rx->usb_packet_size-4) { if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
/* If there is an old first fragment, we don't care. */ /* If there is an old first fragment, we don't care. */
@ -812,6 +812,7 @@ void zd_usb_disable_rx(struct zd_usb *usb)
__zd_usb_disable_rx(usb); __zd_usb_disable_rx(usb);
mutex_unlock(&rx->setup_mutex); mutex_unlock(&rx->setup_mutex);
tasklet_kill(&rx->reset_timer_tasklet);
cancel_delayed_work_sync(&rx->idle_work); cancel_delayed_work_sync(&rx->idle_work);
} }
@ -1106,6 +1107,13 @@ static void zd_rx_idle_timer_handler(struct work_struct *work)
zd_usb_reset_rx(usb); zd_usb_reset_rx(usb);
} }
static void zd_usb_reset_rx_idle_timer_tasklet(unsigned long param)
{
struct zd_usb *usb = (struct zd_usb *)param;
zd_usb_reset_rx_idle_timer(usb);
}
void zd_usb_reset_rx_idle_timer(struct zd_usb *usb) void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
{ {
struct zd_usb_rx *rx = &usb->rx; struct zd_usb_rx *rx = &usb->rx;
@ -1127,6 +1135,7 @@ static inline void init_usb_interrupt(struct zd_usb *usb)
static inline void init_usb_rx(struct zd_usb *usb) static inline void init_usb_rx(struct zd_usb *usb)
{ {
struct zd_usb_rx *rx = &usb->rx; struct zd_usb_rx *rx = &usb->rx;
spin_lock_init(&rx->lock); spin_lock_init(&rx->lock);
mutex_init(&rx->setup_mutex); mutex_init(&rx->setup_mutex);
if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) { if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
@ -1136,11 +1145,14 @@ static inline void init_usb_rx(struct zd_usb *usb)
} }
ZD_ASSERT(rx->fragment_length == 0); ZD_ASSERT(rx->fragment_length == 0);
INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler); INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
rx->reset_timer_tasklet.func = zd_usb_reset_rx_idle_timer_tasklet;
rx->reset_timer_tasklet.data = (unsigned long)usb;
} }
static inline void init_usb_tx(struct zd_usb *usb) static inline void init_usb_tx(struct zd_usb *usb)
{ {
struct zd_usb_tx *tx = &usb->tx; struct zd_usb_tx *tx = &usb->tx;
spin_lock_init(&tx->lock); spin_lock_init(&tx->lock);
atomic_set(&tx->enabled, 0); atomic_set(&tx->enabled, 0);
tx->stopped = 0; tx->stopped = 0;
@ -1671,6 +1683,10 @@ static void iowrite16v_urb_complete(struct urb *urb)
if (urb->status && !usb->cmd_error) if (urb->status && !usb->cmd_error)
usb->cmd_error = urb->status; usb->cmd_error = urb->status;
if (!usb->cmd_error &&
urb->actual_length != urb->transfer_buffer_length)
usb->cmd_error = -EIO;
} }
static int zd_submit_waiting_urb(struct zd_usb *usb, bool last) static int zd_submit_waiting_urb(struct zd_usb *usb, bool last)
@ -1805,7 +1821,7 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT), usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
req, req_len, iowrite16v_urb_complete, usb, req, req_len, iowrite16v_urb_complete, usb,
ep->desc.bInterval); ep->desc.bInterval);
urb->transfer_flags |= URB_FREE_BUFFER | URB_SHORT_NOT_OK; urb->transfer_flags |= URB_FREE_BUFFER;
/* Submit previous URB */ /* Submit previous URB */
r = zd_submit_waiting_urb(usb, false); r = zd_submit_waiting_urb(usb, false);

View File

@ -183,6 +183,7 @@ struct zd_usb_rx {
spinlock_t lock; spinlock_t lock;
struct mutex setup_mutex; struct mutex setup_mutex;
struct delayed_work idle_work; struct delayed_work idle_work;
struct tasklet_struct reset_timer_tasklet;
u8 fragment[2 * USB_MAX_RX_SIZE]; u8 fragment[2 * USB_MAX_RX_SIZE];
unsigned int fragment_length; unsigned int fragment_length;
unsigned int usb_packet_size; unsigned int usb_packet_size;

View File

@ -12,6 +12,7 @@
/** /**
* struct mcp251x_platform_data - MCP251X SPI CAN controller platform data * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
* @oscillator_frequency: - oscillator frequency in Hz * @oscillator_frequency: - oscillator frequency in Hz
* @irq_flags: - IRQF configuration flags
* @board_specific_setup: - called before probing the chip (power,reset) * @board_specific_setup: - called before probing the chip (power,reset)
* @transceiver_enable: - called to power on/off the transceiver * @transceiver_enable: - called to power on/off the transceiver
* @power_enable: - called to power on/off the mcp *and* the * @power_enable: - called to power on/off the mcp *and* the
@ -24,6 +25,7 @@
struct mcp251x_platform_data { struct mcp251x_platform_data {
unsigned long oscillator_frequency; unsigned long oscillator_frequency;
unsigned long irq_flags;
int (*board_specific_setup)(struct spi_device *spi); int (*board_specific_setup)(struct spi_device *spi);
int (*transceiver_enable)(int enable); int (*transceiver_enable)(int enable);
int (*power_enable) (int enable); int (*power_enable) (int enable);

View File

@ -270,7 +270,8 @@ struct nf_afinfo {
unsigned int dataoff, unsigned int dataoff,
unsigned int len, unsigned int len,
u_int8_t protocol); u_int8_t protocol);
int (*route)(struct dst_entry **dst, struct flowi *fl); int (*route)(struct net *net, struct dst_entry **dst,
struct flowi *fl, bool strict);
void (*saveroute)(const struct sk_buff *skb, void (*saveroute)(const struct sk_buff *skb,
struct nf_queue_entry *entry); struct nf_queue_entry *entry);
int (*reroute)(struct sk_buff *skb, int (*reroute)(struct sk_buff *skb,

View File

@ -293,7 +293,7 @@ struct ip_set {
/* Lock protecting the set data */ /* Lock protecting the set data */
rwlock_t lock; rwlock_t lock;
/* References to the set */ /* References to the set */
atomic_t ref; u32 ref;
/* The core set type */ /* The core set type */
struct ip_set_type *type; struct ip_set_type *type;
/* The type variant doing the real job */ /* The type variant doing the real job */

View File

@ -515,8 +515,7 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
if (h->netmask != HOST_MASK) if (h->netmask != HOST_MASK)
NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask); NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
#endif #endif
NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
htonl(atomic_read(&set->ref) - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)); NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
if (with_timeout(h->timeout)) if (with_timeout(h->timeout))
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)); NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));

View File

@ -52,7 +52,7 @@ static inline struct net *skb_net(const struct sk_buff *skb)
*/ */
if (likely(skb->dev && skb->dev->nd_net)) if (likely(skb->dev && skb->dev->nd_net))
return dev_net(skb->dev); return dev_net(skb->dev);
if (skb_dst(skb)->dev) if (skb_dst(skb) && skb_dst(skb)->dev)
return dev_net(skb_dst(skb)->dev); return dev_net(skb_dst(skb)->dev);
WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n", WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
__func__, __LINE__); __func__, __LINE__);

View File

@ -1753,8 +1753,19 @@ enum ieee80211_ampdu_mlme_action {
* that TX/RX_STOP can pass NULL for this parameter. * that TX/RX_STOP can pass NULL for this parameter.
* The @buf_size parameter is only valid when the action is set to * The @buf_size parameter is only valid when the action is set to
* %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
* buffer size (number of subframes) for this session -- aggregates * buffer size (number of subframes) for this session -- the driver
* containing more subframes than this may not be transmitted to the peer. * may neither send aggregates containing more subframes than this
* nor send aggregates in a way that lost frames would exceed the
* buffer size. If just limiting the aggregate size, this would be
* possible with a buf_size of 8:
* - TX: 1.....7
* - RX: 2....7 (lost frame #1)
* - TX: 8..1...
* which is invalid since #1 was now re-transmitted well past the
* buffer size of 8. Correct ways to retransmit #1 would be:
* - TX: 1 or 18 or 81
* Even "189" would be wrong since 1 could be lost again.
*
* Returns a negative error code on failure. * Returns a negative error code on failure.
* The callback can sleep. * The callback can sleep.
* *

View File

@ -64,6 +64,7 @@ struct rtable {
__be32 rt_dst; /* Path destination */ __be32 rt_dst; /* Path destination */
__be32 rt_src; /* Path source */ __be32 rt_src; /* Path source */
int rt_route_iif;
int rt_iif; int rt_iif;
int rt_oif; int rt_oif;
__u32 rt_mark; __u32 rt_mark;
@ -80,12 +81,12 @@ struct rtable {
static inline bool rt_is_input_route(struct rtable *rt) static inline bool rt_is_input_route(struct rtable *rt)
{ {
return rt->rt_iif != 0; return rt->rt_route_iif != 0;
} }
static inline bool rt_is_output_route(struct rtable *rt) static inline bool rt_is_output_route(struct rtable *rt)
{ {
return rt->rt_iif == 0; return rt->rt_route_iif == 0;
} }
struct ip_rt_acct { struct ip_rt_acct {

View File

@ -14,6 +14,13 @@
#include "dsa_priv.h" #include "dsa_priv.h"
#include "mv88e6xxx.h" #include "mv88e6xxx.h"
/*
* Switch product IDs
*/
#define ID_6085 0x04a0
#define ID_6095 0x0950
#define ID_6131 0x1060
static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
{ {
int ret; int ret;
@ -21,9 +28,11 @@ static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
if (ret >= 0) { if (ret >= 0) {
ret &= 0xfff0; ret &= 0xfff0;
if (ret == 0x0950) if (ret == ID_6085)
return "Marvell 88E6085";
if (ret == ID_6095)
return "Marvell 88E6095/88E6095F"; return "Marvell 88E6095/88E6095F";
if (ret == 0x1060) if (ret == ID_6131)
return "Marvell 88E6131"; return "Marvell 88E6131";
} }
@ -164,6 +173,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
static int mv88e6131_setup_port(struct dsa_switch *ds, int p) static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
{ {
struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
int addr = REG_PORT(p); int addr = REG_PORT(p);
u16 val; u16 val;
@ -171,10 +181,13 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
* MAC Forcing register: don't force link, speed, duplex * MAC Forcing register: don't force link, speed, duplex
* or flow control state to any particular values on physical * or flow control state to any particular values on physical
* ports, but force the CPU port and all DSA ports to 1000 Mb/s * ports, but force the CPU port and all DSA ports to 1000 Mb/s
* full duplex. * (100 Mb/s on 6085) full duplex.
*/ */
if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
REG_WRITE(addr, 0x01, 0x003e); if (ps->id == ID_6085)
REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
else
REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
else else
REG_WRITE(addr, 0x01, 0x0003); REG_WRITE(addr, 0x01, 0x0003);
@ -286,6 +299,8 @@ static int mv88e6131_setup(struct dsa_switch *ds)
mv88e6xxx_ppu_state_init(ds); mv88e6xxx_ppu_state_init(ds);
mutex_init(&ps->stats_mutex); mutex_init(&ps->stats_mutex);
ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
ret = mv88e6131_switch_reset(ds); ret = mv88e6131_switch_reset(ds);
if (ret < 0) if (ret < 0)
return ret; return ret;

View File

@ -39,6 +39,8 @@ struct mv88e6xxx_priv_state {
* Hold this mutex over snapshot + dump sequences. * Hold this mutex over snapshot + dump sequences.
*/ */
struct mutex stats_mutex; struct mutex stats_mutex;
int id; /* switch product id */
}; };
struct mv88e6xxx_hw_stat { struct mv88e6xxx_hw_stat {

View File

@ -221,9 +221,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
return csum; return csum;
} }
static int nf_ip_route(struct dst_entry **dst, struct flowi *fl) static int nf_ip_route(struct net *net, struct dst_entry **dst,
struct flowi *fl, bool strict __always_unused)
{ {
struct rtable *rt = ip_route_output_key(&init_net, &fl->u.ip4); struct rtable *rt = ip_route_output_key(net, &fl->u.ip4);
if (IS_ERR(rt)) if (IS_ERR(rt))
return PTR_ERR(rt); return PTR_ERR(rt);
*dst = &rt->dst; *dst = &rt->dst;

View File

@ -1891,6 +1891,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
#ifdef CONFIG_IP_ROUTE_CLASSID #ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag; rth->dst.tclassid = itag;
#endif #endif
rth->rt_route_iif = dev->ifindex;
rth->rt_iif = dev->ifindex; rth->rt_iif = dev->ifindex;
rth->dst.dev = init_net.loopback_dev; rth->dst.dev = init_net.loopback_dev;
dev_hold(rth->dst.dev); dev_hold(rth->dst.dev);
@ -2026,6 +2027,7 @@ static int __mkroute_input(struct sk_buff *skb,
rth->rt_key_src = saddr; rth->rt_key_src = saddr;
rth->rt_src = saddr; rth->rt_src = saddr;
rth->rt_gateway = daddr; rth->rt_gateway = daddr;
rth->rt_route_iif = in_dev->dev->ifindex;
rth->rt_iif = in_dev->dev->ifindex; rth->rt_iif = in_dev->dev->ifindex;
rth->dst.dev = (out_dev)->dev; rth->dst.dev = (out_dev)->dev;
dev_hold(rth->dst.dev); dev_hold(rth->dst.dev);
@ -2202,6 +2204,7 @@ local_input:
#ifdef CONFIG_IP_ROUTE_CLASSID #ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag; rth->dst.tclassid = itag;
#endif #endif
rth->rt_route_iif = dev->ifindex;
rth->rt_iif = dev->ifindex; rth->rt_iif = dev->ifindex;
rth->dst.dev = net->loopback_dev; rth->dst.dev = net->loopback_dev;
dev_hold(rth->dst.dev); dev_hold(rth->dst.dev);
@ -2401,7 +2404,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
rth->rt_mark = oldflp4->flowi4_mark; rth->rt_mark = oldflp4->flowi4_mark;
rth->rt_dst = fl4->daddr; rth->rt_dst = fl4->daddr;
rth->rt_src = fl4->saddr; rth->rt_src = fl4->saddr;
rth->rt_iif = 0; rth->rt_route_iif = 0;
rth->rt_iif = oldflp4->flowi4_oif ? : dev_out->ifindex;
/* get references to the devices that are to be hold by the routing /* get references to the devices that are to be hold by the routing
cache entry */ cache entry */
rth->dst.dev = dev_out; rth->dst.dev = dev_out;
@ -2716,6 +2720,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_key_dst = ort->rt_key_dst; rt->rt_key_dst = ort->rt_key_dst;
rt->rt_key_src = ort->rt_key_src; rt->rt_key_src = ort->rt_key_src;
rt->rt_tos = ort->rt_tos; rt->rt_tos = ort->rt_tos;
rt->rt_route_iif = ort->rt_route_iif;
rt->rt_iif = ort->rt_iif; rt->rt_iif = ort->rt_iif;
rt->rt_oif = ort->rt_oif; rt->rt_oif = ort->rt_oif;
rt->rt_mark = ort->rt_mark; rt->rt_mark = ort->rt_mark;
@ -2725,7 +2730,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_type = ort->rt_type; rt->rt_type = ort->rt_type;
rt->rt_dst = ort->rt_dst; rt->rt_dst = ort->rt_dst;
rt->rt_src = ort->rt_src; rt->rt_src = ort->rt_src;
rt->rt_iif = ort->rt_iif;
rt->rt_gateway = ort->rt_gateway; rt->rt_gateway = ort->rt_gateway;
rt->rt_spec_dst = ort->rt_spec_dst; rt->rt_spec_dst = ort->rt_spec_dst;
rt->peer = ort->peer; rt->peer = ort->peer;

View File

@ -74,6 +74,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
rt->rt_key_dst = fl4->daddr; rt->rt_key_dst = fl4->daddr;
rt->rt_key_src = fl4->saddr; rt->rt_key_src = fl4->saddr;
rt->rt_tos = fl4->flowi4_tos; rt->rt_tos = fl4->flowi4_tos;
rt->rt_route_iif = fl4->flowi4_iif;
rt->rt_iif = fl4->flowi4_iif; rt->rt_iif = fl4->flowi4_iif;
rt->rt_oif = fl4->flowi4_oif; rt->rt_oif = fl4->flowi4_oif;
rt->rt_mark = fl4->flowi4_mark; rt->rt_mark = fl4->flowi4_mark;

View File

@ -90,9 +90,18 @@ static int nf_ip6_reroute(struct sk_buff *skb,
return 0; return 0;
} }
static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl) static int nf_ip6_route(struct net *net, struct dst_entry **dst,
struct flowi *fl, bool strict)
{ {
*dst = ip6_route_output(&init_net, NULL, &fl->u.ip6); static const struct ipv6_pinfo fake_pinfo;
static const struct inet_sock fake_sk = {
/* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
.sk.sk_bound_dev_if = 1,
.pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
};
const void *sk = strict ? &fake_sk : NULL;
*dst = ip6_route_output(net, sk, &fl->u.ip6);
return (*dst)->error; return (*dst)->error;
} }

View File

@ -503,6 +503,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
if (IS_ERR(dst)) { if (IS_ERR(dst)) {
err = PTR_ERR(dst); err = PTR_ERR(dst);
dst = NULL;
goto done; goto done;
} }
skb = tcp_make_synack(sk, dst, req, rvp); skb = tcp_make_synack(sk, dst, req, rvp);
@ -1621,6 +1622,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
opt_skb = skb_clone(skb, GFP_ATOMIC); opt_skb = skb_clone(skb, GFP_ATOMIC);
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
sock_rps_save_rxhash(sk, skb->rxhash);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
goto reset; goto reset;
if (opt_skb) if (opt_skb)
@ -1648,7 +1650,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
__kfree_skb(opt_skb); __kfree_skb(opt_skb);
return 0; return 0;
} }
} } else
sock_rps_save_rxhash(sk, skb->rxhash);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
goto reset; goto reset;

View File

@ -505,6 +505,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
int rc; int rc;
int is_udplite = IS_UDPLITE(sk); int is_udplite = IS_UDPLITE(sk);
if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
sock_rps_save_rxhash(sk, skb->rxhash);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop; goto drop;

View File

@ -2541,7 +2541,6 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
* same TID from the same station * same TID from the same station
*/ */
rx->skb = skb; rx->skb = skb;
rx->flags = 0;
CALL_RXH(ieee80211_rx_h_decrypt) CALL_RXH(ieee80211_rx_h_decrypt)
CALL_RXH(ieee80211_rx_h_check_more_data) CALL_RXH(ieee80211_rx_h_check_more_data)
@ -2612,6 +2611,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
.sdata = sta->sdata, .sdata = sta->sdata,
.local = sta->local, .local = sta->local,
.queue = tid, .queue = tid,
.flags = 0,
}; };
struct tid_ampdu_rx *tid_agg_rx; struct tid_ampdu_rx *tid_agg_rx;

View File

@ -652,7 +652,6 @@ comment "Xtables matches"
config NETFILTER_XT_MATCH_ADDRTYPE config NETFILTER_XT_MATCH_ADDRTYPE
tristate '"addrtype" address type match support' tristate '"addrtype" address type match support'
depends on NETFILTER_ADVANCED depends on NETFILTER_ADVANCED
depends on (IPV6 || IPV6=n)
---help--- ---help---
This option allows you to match what routing thinks of an address, This option allows you to match what routing thinks of an address,
eg. UNICAST, LOCAL, BROADCAST, ... eg. UNICAST, LOCAL, BROADCAST, ...

View File

@ -338,8 +338,7 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
if (map->netmask != 32) if (map->netmask != 32)
NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
htonl(atomic_read(&set->ref) - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize)); htonl(sizeof(*map) + map->memsize));
if (with_timeout(map->timeout)) if (with_timeout(map->timeout))

View File

@ -434,8 +434,7 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
goto nla_put_failure; goto nla_put_failure;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
htonl(atomic_read(&set->ref) - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) htonl(sizeof(*map)
+ (map->last_ip - map->first_ip + 1) * map->dsize)); + (map->last_ip - map->first_ip + 1) * map->dsize));

View File

@ -320,8 +320,7 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
goto nla_put_failure; goto nla_put_failure;
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
htonl(atomic_read(&set->ref) - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize)); htonl(sizeof(*map) + map->memsize));
if (with_timeout(map->timeout)) if (with_timeout(map->timeout))

View File

@ -26,6 +26,7 @@
static LIST_HEAD(ip_set_type_list); /* all registered set types */ static LIST_HEAD(ip_set_type_list); /* all registered set types */
static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
static struct ip_set **ip_set_list; /* all individual sets */ static struct ip_set **ip_set_list; /* all individual sets */
static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */ static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
@ -301,13 +302,18 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
static inline void static inline void
__ip_set_get(ip_set_id_t index) __ip_set_get(ip_set_id_t index)
{ {
atomic_inc(&ip_set_list[index]->ref); write_lock_bh(&ip_set_ref_lock);
ip_set_list[index]->ref++;
write_unlock_bh(&ip_set_ref_lock);
} }
static inline void static inline void
__ip_set_put(ip_set_id_t index) __ip_set_put(ip_set_id_t index)
{ {
atomic_dec(&ip_set_list[index]->ref); write_lock_bh(&ip_set_ref_lock);
BUG_ON(ip_set_list[index]->ref == 0);
ip_set_list[index]->ref--;
write_unlock_bh(&ip_set_ref_lock);
} }
/* /*
@ -324,7 +330,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
struct ip_set *set = ip_set_list[index]; struct ip_set *set = ip_set_list[index];
int ret = 0; int ret = 0;
BUG_ON(set == NULL || atomic_read(&set->ref) == 0); BUG_ON(set == NULL);
pr_debug("set %s, index %u\n", set->name, index); pr_debug("set %s, index %u\n", set->name, index);
if (dim < set->type->dimension || if (dim < set->type->dimension ||
@ -356,7 +362,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
struct ip_set *set = ip_set_list[index]; struct ip_set *set = ip_set_list[index];
int ret; int ret;
BUG_ON(set == NULL || atomic_read(&set->ref) == 0); BUG_ON(set == NULL);
pr_debug("set %s, index %u\n", set->name, index); pr_debug("set %s, index %u\n", set->name, index);
if (dim < set->type->dimension || if (dim < set->type->dimension ||
@ -378,7 +384,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
struct ip_set *set = ip_set_list[index]; struct ip_set *set = ip_set_list[index];
int ret = 0; int ret = 0;
BUG_ON(set == NULL || atomic_read(&set->ref) == 0); BUG_ON(set == NULL);
pr_debug("set %s, index %u\n", set->name, index); pr_debug("set %s, index %u\n", set->name, index);
if (dim < set->type->dimension || if (dim < set->type->dimension ||
@ -397,7 +403,6 @@ EXPORT_SYMBOL_GPL(ip_set_del);
* Find set by name, reference it once. The reference makes sure the * Find set by name, reference it once. The reference makes sure the
* thing pointed to, does not go away under our feet. * thing pointed to, does not go away under our feet.
* *
* The nfnl mutex must already be activated.
*/ */
ip_set_id_t ip_set_id_t
ip_set_get_byname(const char *name, struct ip_set **set) ip_set_get_byname(const char *name, struct ip_set **set)
@ -423,15 +428,12 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
* reference count by 1. The caller shall not assume the index * reference count by 1. The caller shall not assume the index
* to be valid, after calling this function. * to be valid, after calling this function.
* *
* The nfnl mutex must already be activated.
*/ */
void void
ip_set_put_byindex(ip_set_id_t index) ip_set_put_byindex(ip_set_id_t index)
{ {
if (ip_set_list[index] != NULL) { if (ip_set_list[index] != NULL)
BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
__ip_set_put(index); __ip_set_put(index);
}
} }
EXPORT_SYMBOL_GPL(ip_set_put_byindex); EXPORT_SYMBOL_GPL(ip_set_put_byindex);
@ -441,7 +443,6 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
* can't be destroyed. The set cannot be renamed due to * can't be destroyed. The set cannot be renamed due to
* the referencing either. * the referencing either.
* *
* The nfnl mutex must already be activated.
*/ */
const char * const char *
ip_set_name_byindex(ip_set_id_t index) ip_set_name_byindex(ip_set_id_t index)
@ -449,7 +450,7 @@ ip_set_name_byindex(ip_set_id_t index)
const struct ip_set *set = ip_set_list[index]; const struct ip_set *set = ip_set_list[index];
BUG_ON(set == NULL); BUG_ON(set == NULL);
BUG_ON(atomic_read(&set->ref) == 0); BUG_ON(set->ref == 0);
/* Referenced, so it's safe */ /* Referenced, so it's safe */
return set->name; return set->name;
@ -515,10 +516,7 @@ void
ip_set_nfnl_put(ip_set_id_t index) ip_set_nfnl_put(ip_set_id_t index)
{ {
nfnl_lock(); nfnl_lock();
if (ip_set_list[index] != NULL) { ip_set_put_byindex(index);
BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
__ip_set_put(index);
}
nfnl_unlock(); nfnl_unlock();
} }
EXPORT_SYMBOL_GPL(ip_set_nfnl_put); EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
@ -526,7 +524,7 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
/* /*
* Communication protocol with userspace over netlink. * Communication protocol with userspace over netlink.
* *
* We already locked by nfnl_lock. * The commands are serialized by the nfnl mutex.
*/ */
static inline bool static inline bool
@ -657,7 +655,6 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
return -ENOMEM; return -ENOMEM;
rwlock_init(&set->lock); rwlock_init(&set->lock);
strlcpy(set->name, name, IPSET_MAXNAMELEN); strlcpy(set->name, name, IPSET_MAXNAMELEN);
atomic_set(&set->ref, 0);
set->family = family; set->family = family;
/* /*
@ -690,8 +687,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
/* /*
* Here, we have a valid, constructed set and we are protected * Here, we have a valid, constructed set and we are protected
* by nfnl_lock. Find the first free index in ip_set_list and * by the nfnl mutex. Find the first free index in ip_set_list
* check clashing. * and check clashing.
*/ */
if ((ret = find_free_id(set->name, &index, &clash)) != 0) { if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
/* If this is the same set and requested, ignore error */ /* If this is the same set and requested, ignore error */
@ -751,31 +748,51 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
const struct nlattr * const attr[]) const struct nlattr * const attr[])
{ {
ip_set_id_t i; ip_set_id_t i;
int ret = 0;
if (unlikely(protocol_failed(attr))) if (unlikely(protocol_failed(attr)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
/* References are protected by the nfnl mutex */ /* Commands are serialized and references are
* protected by the ip_set_ref_lock.
* External systems (i.e. xt_set) must call
* ip_set_put|get_nfnl_* functions, that way we
* can safely check references here.
*
* list:set timer can only decrement the reference
* counter, so if it's already zero, we can proceed
* without holding the lock.
*/
read_lock_bh(&ip_set_ref_lock);
if (!attr[IPSET_ATTR_SETNAME]) { if (!attr[IPSET_ATTR_SETNAME]) {
for (i = 0; i < ip_set_max; i++) { for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL && if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
(atomic_read(&ip_set_list[i]->ref))) ret = IPSET_ERR_BUSY;
return -IPSET_ERR_BUSY; goto out;
}
} }
read_unlock_bh(&ip_set_ref_lock);
for (i = 0; i < ip_set_max; i++) { for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL) if (ip_set_list[i] != NULL)
ip_set_destroy_set(i); ip_set_destroy_set(i);
} }
} else { } else {
i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
if (i == IPSET_INVALID_ID) if (i == IPSET_INVALID_ID) {
return -ENOENT; ret = -ENOENT;
else if (atomic_read(&ip_set_list[i]->ref)) goto out;
return -IPSET_ERR_BUSY; } else if (ip_set_list[i]->ref) {
ret = -IPSET_ERR_BUSY;
goto out;
}
read_unlock_bh(&ip_set_ref_lock);
ip_set_destroy_set(i); ip_set_destroy_set(i);
} }
return 0; return 0;
out:
read_unlock_bh(&ip_set_ref_lock);
return ret;
} }
/* Flush sets */ /* Flush sets */
@ -834,6 +851,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
struct ip_set *set; struct ip_set *set;
const char *name2; const char *name2;
ip_set_id_t i; ip_set_id_t i;
int ret = 0;
if (unlikely(protocol_failed(attr) || if (unlikely(protocol_failed(attr) ||
attr[IPSET_ATTR_SETNAME] == NULL || attr[IPSET_ATTR_SETNAME] == NULL ||
@ -843,25 +861,33 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
set = find_set(nla_data(attr[IPSET_ATTR_SETNAME])); set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
if (set == NULL) if (set == NULL)
return -ENOENT; return -ENOENT;
if (atomic_read(&set->ref) != 0)
return -IPSET_ERR_REFERENCED; read_lock_bh(&ip_set_ref_lock);
if (set->ref != 0) {
ret = -IPSET_ERR_REFERENCED;
goto out;
}
name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
for (i = 0; i < ip_set_max; i++) { for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL && if (ip_set_list[i] != NULL &&
STREQ(ip_set_list[i]->name, name2)) STREQ(ip_set_list[i]->name, name2)) {
return -IPSET_ERR_EXIST_SETNAME2; ret = -IPSET_ERR_EXIST_SETNAME2;
goto out;
}
} }
strncpy(set->name, name2, IPSET_MAXNAMELEN); strncpy(set->name, name2, IPSET_MAXNAMELEN);
return 0; out:
read_unlock_bh(&ip_set_ref_lock);
return ret;
} }
/* Swap two sets so that name/index points to the other. /* Swap two sets so that name/index points to the other.
* References and set names are also swapped. * References and set names are also swapped.
* *
* We are protected by the nfnl mutex and references are * The commands are serialized by the nfnl mutex and references are
* manipulated only by holding the mutex. The kernel interfaces * protected by the ip_set_ref_lock. The kernel interfaces
* do not hold the mutex but the pointer settings are atomic * do not hold the mutex but the pointer settings are atomic
* so the ip_set_list always contains valid pointers to the sets. * so the ip_set_list always contains valid pointers to the sets.
*/ */
@ -874,7 +900,6 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
struct ip_set *from, *to; struct ip_set *from, *to;
ip_set_id_t from_id, to_id; ip_set_id_t from_id, to_id;
char from_name[IPSET_MAXNAMELEN]; char from_name[IPSET_MAXNAMELEN];
u32 from_ref;
if (unlikely(protocol_failed(attr) || if (unlikely(protocol_failed(attr) ||
attr[IPSET_ATTR_SETNAME] == NULL || attr[IPSET_ATTR_SETNAME] == NULL ||
@ -899,17 +924,15 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
from->type->family == to->type->family)) from->type->family == to->type->family))
return -IPSET_ERR_TYPE_MISMATCH; return -IPSET_ERR_TYPE_MISMATCH;
/* No magic here: ref munging protected by the nfnl_lock */
strncpy(from_name, from->name, IPSET_MAXNAMELEN); strncpy(from_name, from->name, IPSET_MAXNAMELEN);
from_ref = atomic_read(&from->ref);
strncpy(from->name, to->name, IPSET_MAXNAMELEN); strncpy(from->name, to->name, IPSET_MAXNAMELEN);
atomic_set(&from->ref, atomic_read(&to->ref));
strncpy(to->name, from_name, IPSET_MAXNAMELEN); strncpy(to->name, from_name, IPSET_MAXNAMELEN);
atomic_set(&to->ref, from_ref);
write_lock_bh(&ip_set_ref_lock);
swap(from->ref, to->ref);
ip_set_list[from_id] = to; ip_set_list[from_id] = to;
ip_set_list[to_id] = from; ip_set_list[to_id] = from;
write_unlock_bh(&ip_set_ref_lock);
return 0; return 0;
} }
@ -926,7 +949,7 @@ ip_set_dump_done(struct netlink_callback *cb)
{ {
if (cb->args[2]) { if (cb->args[2]) {
pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name); pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
__ip_set_put((ip_set_id_t) cb->args[1]); ip_set_put_byindex((ip_set_id_t) cb->args[1]);
} }
return 0; return 0;
} }
@ -1068,7 +1091,7 @@ release_refcount:
/* If there was an error or set is done, release set */ /* If there was an error or set is done, release set */
if (ret || !cb->args[2]) { if (ret || !cb->args[2]) {
pr_debug("release set %s\n", ip_set_list[index]->name); pr_debug("release set %s\n", ip_set_list[index]->name);
__ip_set_put(index); ip_set_put_byindex(index);
} }
/* If we dump all sets, continue with dumping last ones */ /* If we dump all sets, continue with dumping last ones */

View File

@ -43,14 +43,19 @@ struct list_set {
static inline struct set_elem * static inline struct set_elem *
list_set_elem(const struct list_set *map, u32 id) list_set_elem(const struct list_set *map, u32 id)
{ {
return (struct set_elem *)((char *)map->members + id * map->dsize); return (struct set_elem *)((void *)map->members + id * map->dsize);
}
static inline struct set_telem *
list_set_telem(const struct list_set *map, u32 id)
{
return (struct set_telem *)((void *)map->members + id * map->dsize);
} }
static inline bool static inline bool
list_set_timeout(const struct list_set *map, u32 id) list_set_timeout(const struct list_set *map, u32 id)
{ {
const struct set_telem *elem = const struct set_telem *elem = list_set_telem(map, id);
(const struct set_telem *) list_set_elem(map, id);
return ip_set_timeout_test(elem->timeout); return ip_set_timeout_test(elem->timeout);
} }
@ -58,19 +63,11 @@ list_set_timeout(const struct list_set *map, u32 id)
static inline bool static inline bool
list_set_expired(const struct list_set *map, u32 id) list_set_expired(const struct list_set *map, u32 id)
{ {
const struct set_telem *elem = const struct set_telem *elem = list_set_telem(map, id);
(const struct set_telem *) list_set_elem(map, id);
return ip_set_timeout_expired(elem->timeout); return ip_set_timeout_expired(elem->timeout);
} }
static inline int
list_set_exist(const struct set_telem *elem)
{
return elem->id != IPSET_INVALID_ID &&
!ip_set_timeout_expired(elem->timeout);
}
/* Set list without and with timeout */ /* Set list without and with timeout */
static int static int
@ -146,11 +143,11 @@ list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
struct set_telem *e; struct set_telem *e;
for (; i < map->size; i++) { for (; i < map->size; i++) {
e = (struct set_telem *)list_set_elem(map, i); e = list_set_telem(map, i);
swap(e->id, id); swap(e->id, id);
swap(e->timeout, timeout);
if (e->id == IPSET_INVALID_ID) if (e->id == IPSET_INVALID_ID)
break; break;
swap(e->timeout, timeout);
} }
} }
@ -164,7 +161,7 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
/* Last element replaced: e.g. add new,before,last */ /* Last element replaced: e.g. add new,before,last */
ip_set_put_byindex(e->id); ip_set_put_byindex(e->id);
if (with_timeout(map->timeout)) if (with_timeout(map->timeout))
list_elem_tadd(map, i, id, timeout); list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
else else
list_elem_add(map, i, id); list_elem_add(map, i, id);
@ -172,11 +169,11 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
} }
static int static int
list_set_del(struct list_set *map, ip_set_id_t id, u32 i) list_set_del(struct list_set *map, u32 i)
{ {
struct set_elem *a = list_set_elem(map, i), *b; struct set_elem *a = list_set_elem(map, i), *b;
ip_set_put_byindex(id); ip_set_put_byindex(a->id);
for (; i < map->size - 1; i++) { for (; i < map->size - 1; i++) {
b = list_set_elem(map, i + 1); b = list_set_elem(map, i + 1);
@ -308,11 +305,11 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
(before == 0 || (before == 0 ||
(before > 0 && (before > 0 &&
next_id_eq(map, i, refid)))) next_id_eq(map, i, refid))))
ret = list_set_del(map, id, i); ret = list_set_del(map, i);
else if (before < 0 && else if (before < 0 &&
elem->id == refid && elem->id == refid &&
next_id_eq(map, i, id)) next_id_eq(map, i, id))
ret = list_set_del(map, id, i + 1); ret = list_set_del(map, i + 1);
} }
break; break;
default: default:
@ -369,8 +366,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
if (with_timeout(map->timeout)) if (with_timeout(map->timeout))
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
htonl(atomic_read(&set->ref) - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->size * map->dsize)); htonl(sizeof(*map) + map->size * map->dsize));
ipset_nest_end(skb, nested); ipset_nest_end(skb, nested);
@ -461,16 +457,13 @@ list_set_gc(unsigned long ul_set)
struct set_telem *e; struct set_telem *e;
u32 i; u32 i;
/* We run parallel with other readers (test element) write_lock_bh(&set->lock);
* but adding/deleting new entries is locked out */ for (i = 0; i < map->size; i++) {
read_lock_bh(&set->lock); e = list_set_telem(map, i);
for (i = map->size - 1; i >= 0; i--) { if (e->id != IPSET_INVALID_ID && list_set_expired(map, i))
e = (struct set_telem *) list_set_elem(map, i); list_set_del(map, i);
if (e->id != IPSET_INVALID_ID &&
list_set_expired(map, i))
list_set_del(map, e->id, i);
} }
read_unlock_bh(&set->lock); write_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
add_timer(&map->gc); add_timer(&map->gc);

View File

@ -3120,7 +3120,7 @@ nla_put_failure:
static int ip_vs_genl_dump_daemons(struct sk_buff *skb, static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
struct netlink_callback *cb) struct netlink_callback *cb)
{ {
struct net *net = skb_net(skb); struct net *net = skb_sknet(skb);
struct netns_ipvs *ipvs = net_ipvs(net); struct netns_ipvs *ipvs = net_ipvs(net);
mutex_lock(&__ip_vs_mutex); mutex_lock(&__ip_vs_mutex);

View File

@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f,
CHECK_BOUND(bs, 2); CHECK_BOUND(bs, 2);
count = *bs->cur++; count = *bs->cur++;
count <<= 8; count <<= 8;
count = *bs->cur++; count += *bs->cur++;
break; break;
case SEMI: case SEMI:
BYTE_ALIGN(bs); BYTE_ALIGN(bs);

View File

@ -731,10 +731,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
memset(&fl2, 0, sizeof(fl2)); memset(&fl2, 0, sizeof(fl2));
fl2.daddr = dst->ip; fl2.daddr = dst->ip;
if (!afinfo->route((struct dst_entry **)&rt1, if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
flowi4_to_flowi(&fl1))) { flowi4_to_flowi(&fl1), false)) {
if (!afinfo->route((struct dst_entry **)&rt2, if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
flowi4_to_flowi(&fl2))) { flowi4_to_flowi(&fl2), false)) {
if (rt1->rt_gateway == rt2->rt_gateway && if (rt1->rt_gateway == rt2->rt_gateway &&
rt1->dst.dev == rt2->dst.dev) rt1->dst.dev == rt2->dst.dev)
ret = 1; ret = 1;
@ -755,10 +755,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
memset(&fl2, 0, sizeof(fl2)); memset(&fl2, 0, sizeof(fl2));
ipv6_addr_copy(&fl2.daddr, &dst->in6); ipv6_addr_copy(&fl2.daddr, &dst->in6);
if (!afinfo->route((struct dst_entry **)&rt1, if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
flowi6_to_flowi(&fl1))) { flowi6_to_flowi(&fl1), false)) {
if (!afinfo->route((struct dst_entry **)&rt2, if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
flowi6_to_flowi(&fl2))) { flowi6_to_flowi(&fl2), false)) {
if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
sizeof(rt1->rt6i_gateway)) && sizeof(rt1->rt6i_gateway)) &&
rt1->dst.dev == rt2->dst.dev) rt1->dst.dev == rt2->dst.dev)

View File

@ -166,7 +166,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
rcu_read_lock(); rcu_read_lock();
ai = nf_get_afinfo(family); ai = nf_get_afinfo(family);
if (ai != NULL) if (ai != NULL)
ai->route((struct dst_entry **)&rt, &fl); ai->route(&init_net, (struct dst_entry **)&rt, &fl, false);
rcu_read_unlock(); rcu_read_unlock();
if (rt != NULL) { if (rt != NULL) {

View File

@ -32,11 +32,32 @@ MODULE_ALIAS("ipt_addrtype");
MODULE_ALIAS("ip6t_addrtype"); MODULE_ALIAS("ip6t_addrtype");
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt) static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
const struct in6_addr *addr)
{ {
const struct nf_afinfo *afinfo;
struct flowi6 flow;
struct rt6_info *rt;
u32 ret; u32 ret;
int route_err;
if (!rt) memset(&flow, 0, sizeof(flow));
ipv6_addr_copy(&flow.daddr, addr);
if (dev)
flow.flowi6_oif = dev->ifindex;
rcu_read_lock();
afinfo = nf_get_afinfo(NFPROTO_IPV6);
if (afinfo != NULL)
route_err = afinfo->route(net, (struct dst_entry **)&rt,
flowi6_to_flowi(&flow), !!dev);
else
route_err = 1;
rcu_read_unlock();
if (route_err)
return XT_ADDRTYPE_UNREACHABLE; return XT_ADDRTYPE_UNREACHABLE;
if (rt->rt6i_flags & RTF_REJECT) if (rt->rt6i_flags & RTF_REJECT)
@ -48,6 +69,9 @@ static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt)
ret |= XT_ADDRTYPE_LOCAL; ret |= XT_ADDRTYPE_LOCAL;
if (rt->rt6i_flags & RTF_ANYCAST) if (rt->rt6i_flags & RTF_ANYCAST)
ret |= XT_ADDRTYPE_ANYCAST; ret |= XT_ADDRTYPE_ANYCAST;
dst_release(&rt->dst);
return ret; return ret;
} }
@ -65,18 +89,8 @@ static bool match_type6(struct net *net, const struct net_device *dev,
return false; return false;
if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST | if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
XT_ADDRTYPE_UNREACHABLE) & mask) { XT_ADDRTYPE_UNREACHABLE) & mask)
struct rt6_info *rt; return !!(mask & match_lookup_rt6(net, dev, addr));
u32 type;
int ifindex = dev ? dev->ifindex : 0;
rt = rt6_lookup(net, addr, NULL, ifindex, !!dev);
type = xt_addrtype_rt6_to_type(rt);
dst_release(&rt->dst);
return !!(mask & type);
}
return true; return true;
} }

View File

@ -195,7 +195,7 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
return info->match_flags & XT_CONNTRACK_STATE; return info->match_flags & XT_CONNTRACK_STATE;
if ((info->match_flags & XT_CONNTRACK_DIRECTION) && if ((info->match_flags & XT_CONNTRACK_DIRECTION) &&
(CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^ (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^
!!(info->invert_flags & XT_CONNTRACK_DIRECTION)) !(info->invert_flags & XT_CONNTRACK_DIRECTION))
return false; return false;
if (info->match_flags & XT_CONNTRACK_ORIGSRC) if (info->match_flags & XT_CONNTRACK_ORIGSRC)