Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Don't ignore user initiated wireless regulatory settings on cards
    with custom regulatory domains, from Arik Nemtsov.

 2) Fix length check of bluetooth information responses, from Jaganath
    Kanakkassery.

 3) Fix misuse of PTR_ERR in btusb, from Adam Lee.

 4) Handle rfkill properly while iwlwifi devices are offline, from
    Emmanuel Grumbach.

 5) Fix r815x devices DMA'ing to stack buffers, from Hayes Wang.

 6) Kernel info leak in ATM packet scheduler, from Dan Carpenter.

 7) 8139cp doesn't check for DMA mapping errors, from Neil Horman.

 8) Fix bridge multicast code to not snoop when no querier exists,
    otherwise mutlicast traffic is lost.  From Linus Lüssing.

 9) Avoid soft lockups in fib6_run_gc(), from Michal Kubecek.

10) Fix races in automatic address asignment on ipv6, which can result
    in incorrect lifetime assignments.  From Jiri Benc.

11) Cure build bustage when CONFIG_NET_LL_RX_POLL is not set and rename
    it CONFIG_NET_RX_BUSY_POLL to eliminate the last reference to the
    original naming of this feature.  From Cong Wang.

12) Fix crash in TIPC when server socket creation fails, from Ying Xue.

13) macvlan_changelink() silently succeeds when it shouldn't, from
    Michael S Tsirkin.

14) HTB packet scheduler can crash due to sign extension, fix from
    Stephen Hemminger.

15) With the cable unplugged, r8169 prints out a message every 10
    seconds, make it netif_dbg() instead of netif_warn().  From Peter
    Wu.

16) Fix memory leak in rtm_to_ifaddr(), from Daniel Borkmann.

17) sis900 gets spurious TX queue timeouts due to mismanagement of link
    carrier state, from Denis Kirjanov.

18) Validate somaxconn sysctl to make sure it fits inside of a u16.
    From Roman Gushchin.

19) Fix MAC address filtering on qlcnic, from Shahed Shaikh.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (68 commits)
  qlcnic: Fix for flash update failure on 83xx adapter
  qlcnic: Fix link speed and duplex display for 83xx adapter
  qlcnic: Fix link speed display for 82xx adapter
  qlcnic: Fix external loopback test.
  qlcnic: Removed adapter series name from warning messages.
  qlcnic: Free up memory in error path.
  qlcnic: Fix ingress MAC learning
  qlcnic: Fix MAC address filter issue on 82xx adapter
  net: ethernet: davinci_emac: drop IRQF_DISABLED
  netlabel: use domain based selectors when address based selectors are not available
  net: check net.core.somaxconn sysctl values
  sis900: Fix the tx queue timeout issue
  net: rtm_to_ifaddr: free ifa if ifa_cacheinfo processing fails
  r8169: remove "PHY reset until link up" log spam
  net: ethernet: cpsw: drop IRQF_DISABLED
  htb: fix sign extension bug
  macvlan: handle set_promiscuity failures
  macvlan: better mode validation
  tipc: fix oops when creating server socket fails
  net: rename CONFIG_NET_LL_RX_POLL to CONFIG_NET_RX_BUSY_POLL
  ...
This commit is contained in:
Linus Torvalds 2013-08-03 15:00:23 -07:00
commit 72a67a94bc
92 changed files with 995 additions and 678 deletions

View File

@ -52,7 +52,7 @@ Default: 64
busy_read
----------------
Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL)
Low latency busy poll timeout for socket reads. (needs CONFIG_NET_RX_BUSY_POLL)
Approximate time in us to busy loop waiting for packets on the device queue.
This sets the default value of the SO_BUSY_POLL socket option.
Can be set or overridden per socket by setting socket option SO_BUSY_POLL,
@ -63,7 +63,7 @@ Default: 0 (off)
busy_poll
----------------
Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL)
Low latency busy poll timeout for poll and select. (needs CONFIG_NET_RX_BUSY_POLL)
Approximate time in us to busy loop waiting for events.
Recommended value depends on the number of sockets you poll on.
For several sockets 50, for several hundreds 100.

View File

@ -1406,7 +1406,7 @@ ATHEROS ATH6KL WIRELESS DRIVER
M: Kalle Valo <kvalo@qca.qualcomm.com>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/ath6kl
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath6kl.git
T: git git://github.com/kvalo/ath.git
S: Supported
F: drivers/net/wireless/ath/ath6kl/
@ -6726,6 +6726,14 @@ T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained
F: drivers/media/tuners/qt1010*
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
M: Kalle Valo <kvalo@qca.qualcomm.com>
L: ath10k@lists.infradead.org
W: http://wireless.kernel.org/en/users/Drivers/ath10k
T: git git://github.com/kvalo/ath.git
S: Supported
F: drivers/net/wireless/ath/ath10k/
QUALCOMM HEXAGON ARCHITECTURE
M: Richard Kuo <rkuo@codeaurora.org>
L: linux-hexagon@vger.kernel.org
@ -8270,7 +8278,7 @@ S: Maintained
F: sound/soc/codecs/twl4030*
TI WILINK WIRELESS DRIVERS
M: Luciano Coelho <coelho@ti.com>
M: Luciano Coelho <luca@coelho.fi>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/wl12xx
W: http://wireless.kernel.org/en/users/Drivers/wl1251

View File

@ -91,6 +91,10 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0489, 0xe04e) },
{ USB_DEVICE(0x0489, 0xe056) },
{ USB_DEVICE(0x0489, 0xe04d) },
{ USB_DEVICE(0x04c5, 0x1330) },
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x0cf3, 0x3121) },
{ USB_DEVICE(0x0cf3, 0xe003) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
@ -128,6 +132,10 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
@ -193,24 +201,44 @@ error:
static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
{
int pipe = 0;
int ret, pipe = 0;
char *buf;
buf = kmalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
pipe = usb_rcvctrlpipe(udev, 0);
return usb_control_msg(udev, pipe, ATH3K_GETSTATE,
USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
state, 0x01, USB_CTRL_SET_TIMEOUT);
ret = usb_control_msg(udev, pipe, ATH3K_GETSTATE,
USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
buf, sizeof(*buf), USB_CTRL_SET_TIMEOUT);
*state = *buf;
kfree(buf);
return ret;
}
static int ath3k_get_version(struct usb_device *udev,
struct ath3k_version *version)
{
int pipe = 0;
int ret, pipe = 0;
struct ath3k_version *buf;
const int size = sizeof(*buf);
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pipe = usb_rcvctrlpipe(udev, 0);
return usb_control_msg(udev, pipe, ATH3K_GETVERSION,
USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version,
sizeof(struct ath3k_version),
USB_CTRL_SET_TIMEOUT);
ret = usb_control_msg(udev, pipe, ATH3K_GETVERSION,
USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
buf, size, USB_CTRL_SET_TIMEOUT);
memcpy(version, buf, size);
kfree(buf);
return ret;
}
static int ath3k_load_fwfile(struct usb_device *udev,

View File

@ -154,6 +154,10 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@ -1095,7 +1099,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
if (IS_ERR(skb)) {
BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)",
hdev->name, cmd->opcode, PTR_ERR(skb));
return -PTR_ERR(skb);
return PTR_ERR(skb);
}
/* It ensures that the returned event matches the event data read from
@ -1147,7 +1151,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (IS_ERR(skb)) {
BT_ERR("%s sending initial HCI reset command failed (%ld)",
hdev->name, PTR_ERR(skb));
return -PTR_ERR(skb);
return PTR_ERR(skb);
}
kfree_skb(skb);
@ -1161,7 +1165,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (IS_ERR(skb)) {
BT_ERR("%s reading Intel fw version command failed (%ld)",
hdev->name, PTR_ERR(skb));
return -PTR_ERR(skb);
return PTR_ERR(skb);
}
if (skb->len != sizeof(*ver)) {
@ -1219,7 +1223,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
BT_ERR("%s entering Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
release_firmware(fw);
return -PTR_ERR(skb);
return PTR_ERR(skb);
}
if (skb->data[0]) {
@ -1276,7 +1280,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
return -PTR_ERR(skb);
return PTR_ERR(skb);
}
kfree_skb(skb);
@ -1292,7 +1296,7 @@ exit_mfg_disable:
if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
return -PTR_ERR(skb);
return PTR_ERR(skb);
}
kfree_skb(skb);
@ -1310,7 +1314,7 @@ exit_mfg_deactivate:
if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
return -PTR_ERR(skb);
return PTR_ERR(skb);
}
kfree_skb(skb);

View File

@ -486,7 +486,7 @@ struct bnx2x_fastpath {
struct napi_struct napi;
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define BNX2X_FP_STATE_IDLE 0
#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
@ -498,7 +498,7 @@ struct bnx2x_fastpath {
#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
/* protect state */
spinlock_t lock;
#endif /* CONFIG_NET_LL_RX_POLL */
#endif /* CONFIG_NET_RX_BUSY_POLL */
union host_hc_status_block status_blk;
/* chip independent shortcuts into sb structure */
@ -572,7 +572,7 @@ struct bnx2x_fastpath {
#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
{
spin_lock_init(&fp->lock);
@ -680,7 +680,7 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{
return false;
}
#endif /* CONFIG_NET_LL_RX_POLL */
#endif /* CONFIG_NET_RX_BUSY_POLL */
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500

View File

@ -3117,7 +3117,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
return work_done;
}
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
int bnx2x_low_latency_recv(struct napi_struct *napi)
{

View File

@ -12026,7 +12026,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = bnx2x_low_latency_recv,
#endif
};

View File

@ -54,7 +54,7 @@
#include <net/busy_poll.h>
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
#define LL_EXTENDED_STATS
#endif
/* common prefix used by pr_<> macros */
@ -366,7 +366,7 @@ struct ixgbe_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define IXGBE_QV_STATE_IDLE 0
#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
@ -377,12 +377,12 @@ struct ixgbe_q_vector {
#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
spinlock_t lock;
#endif /* CONFIG_NET_LL_RX_POLL */
#endif /* CONFIG_NET_RX_BUSY_POLL */
/* for dynamic allocation of rings associated with this q_vector */
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
@ -462,7 +462,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
return q_vector->state & IXGBE_QV_USER_PEND;
}
#else /* CONFIG_NET_LL_RX_POLL */
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
}
@ -491,7 +491,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
{
return false;
}
#endif /* CONFIG_NET_LL_RX_POLL */
#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_IXGBE_HWMON

View File

@ -1998,7 +1998,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
return total_rx_packets;
}
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
static int ixgbe_low_latency_recv(struct napi_struct *napi)
{
@ -2030,7 +2030,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
return found;
}
#endif /* CONFIG_NET_LL_RX_POLL */
#endif /* CONFIG_NET_RX_BUSY_POLL */
/**
* ixgbe_configure_msix - Configure MSI-X hardware
@ -7227,7 +7227,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = ixgbe_low_latency_recv,
#endif
#ifdef IXGBE_FCOE

View File

@ -223,7 +223,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_STATS:
return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
(priv->tx_ring_num * 2) +
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
(priv->rx_ring_num * 5);
#else
(priv->rx_ring_num * 2);
@ -276,7 +276,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i].packets;
data[index++] = priv->rx_ring[i].bytes;
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
data[index++] = priv->rx_ring[i].yields;
data[index++] = priv->rx_ring[i].misses;
data[index++] = priv->rx_ring[i].cleaned;
@ -344,7 +344,7 @@ static void mlx4_en_get_strings(struct net_device *dev,
"rx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i);
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_napi_yield", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,

View File

@ -68,7 +68,7 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
return 0;
}
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
static int mlx4_en_low_latency_recv(struct napi_struct *napi)
{
@ -94,7 +94,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
return done;
}
#endif /* CONFIG_NET_LL_RX_POLL */
#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_RFS_ACCEL
@ -2140,7 +2140,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = mlx4_en_low_latency_recv,
#endif
};

View File

@ -845,16 +845,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
MLX4_CMD_NATIVE);
if (!err && dev->caps.function != slave) {
/* if config MAC in DB use it */
if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
else {
/* set slave default_mac address */
MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
def_mac += slave << 8;
priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac;
}
def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
/* get port type - currently only eth is enabled */

View File

@ -371,7 +371,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
if (!enable_64b_cqe_eqe) {
if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
if (dev_cap->flags &
(MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");

View File

@ -292,7 +292,7 @@ struct mlx4_en_rx_ring {
void *rx_info;
unsigned long bytes;
unsigned long packets;
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned long yields;
unsigned long misses;
unsigned long cleaned;
@ -318,7 +318,7 @@ struct mlx4_en_cq {
struct mlx4_cqe *buf;
#define MLX4_EN_OPCODE_ERROR 0x1e
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define MLX4_EN_CQ_STATE_IDLE 0
#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
@ -329,7 +329,7 @@ struct mlx4_en_cq {
#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
spinlock_t poll_lock; /* protects from LLS/napi conflicts */
#endif /* CONFIG_NET_LL_RX_POLL */
#endif /* CONFIG_NET_RX_BUSY_POLL */
};
struct mlx4_en_port_profile {
@ -580,7 +580,7 @@ struct mlx4_mac_entry {
struct rcu_head rcu;
};
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
{
spin_lock_init(&cq->poll_lock);
@ -687,7 +687,7 @@ static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
{
return false;
}
#endif /* CONFIG_NET_LL_RX_POLL */
#endif /* CONFIG_NET_RX_BUSY_POLL */
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)

View File

@ -1400,8 +1400,8 @@ void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
#define ADDR_IN_RANGE(addr, low, high) \
(((addr) < (high)) && ((addr) >= (low)))
#define QLCRD32(adapter, off) \
(adapter->ahw->hw_ops->read_reg)(adapter, off)
#define QLCRD32(adapter, off, err) \
(adapter->ahw->hw_ops->read_reg)(adapter, off, err)
#define QLCWR32(adapter, off, val) \
adapter->ahw->hw_ops->write_reg(adapter, off, val)
@ -1604,7 +1604,7 @@ struct qlcnic_nic_template {
struct qlcnic_hardware_ops {
void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
int (*read_reg) (struct qlcnic_adapter *, ulong);
int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
void (*get_ocm_win) (struct qlcnic_hardware_context *);
int (*get_mac_address) (struct qlcnic_adapter *, u8 *);
@ -1662,12 +1662,6 @@ static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
}
static inline int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter,
ulong off)
{
return adapter->ahw->hw_ops->read_reg(adapter, off);
}
static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
ulong off, u32 data)
{

View File

@ -228,17 +228,17 @@ static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr)
return 0;
}
int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr)
int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
int *err)
{
int ret;
struct qlcnic_hardware_context *ahw = adapter->ahw;
ret = __qlcnic_set_win_base(adapter, (u32) addr);
if (!ret) {
*err = __qlcnic_set_win_base(adapter, (u32) addr);
if (!*err) {
return QLCRDX(ahw, QLCNIC_WILDCARD);
} else {
dev_err(&adapter->pdev->dev,
"%s failed, addr = 0x%x\n", __func__, (int)addr);
"%s failed, addr = 0x%lx\n", __func__, addr);
return -EIO;
}
}
@ -561,7 +561,7 @@ void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
loff_t offset, size_t size)
{
int ret;
int ret = 0;
u32 data;
if (qlcnic_api_lock(adapter)) {
@ -571,7 +571,7 @@ void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
return;
}
ret = qlcnic_83xx_rd_reg_indirect(adapter, (u32) offset);
data = QLCRD32(adapter, (u32) offset, &ret);
qlcnic_api_unlock(adapter);
if (ret == -EIO) {
@ -580,7 +580,6 @@ void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
__func__, (u32)offset);
return;
}
data = ret;
memcpy(buf, &data, size);
}
@ -2075,18 +2074,25 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
u32 data[])
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u8 link_status, duplex;
/* link speed */
link_status = LSB(data[3]) & 1;
adapter->ahw->link_speed = MSW(data[2]);
adapter->ahw->link_autoneg = MSB(MSW(data[3]));
adapter->ahw->module_type = MSB(LSW(data[3]));
duplex = LSB(MSW(data[3]));
if (duplex)
adapter->ahw->link_duplex = DUPLEX_FULL;
else
adapter->ahw->link_duplex = DUPLEX_HALF;
adapter->ahw->has_link_events = 1;
if (link_status) {
ahw->link_speed = MSW(data[2]);
duplex = LSB(MSW(data[3]));
if (duplex)
ahw->link_duplex = DUPLEX_FULL;
else
ahw->link_duplex = DUPLEX_HALF;
} else {
ahw->link_speed = SPEED_UNKNOWN;
ahw->link_duplex = DUPLEX_UNKNOWN;
}
ahw->link_autoneg = MSB(MSW(data[3]));
ahw->module_type = MSB(LSW(data[3]));
ahw->has_link_events = 1;
qlcnic_advert_link_change(adapter, link_status);
}
@ -2384,9 +2390,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
u32 flash_addr, u8 *p_data,
int count)
{
int i, ret;
u32 word, range, flash_offset, addr = flash_addr;
u32 word, range, flash_offset, addr = flash_addr, ret;
ulong indirect_add, direct_window;
int i, err = 0;
flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
if (addr & 0x3) {
@ -2404,10 +2410,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
/* Multi sector read */
for (i = 0; i < count; i++) {
indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
ret = qlcnic_83xx_rd_reg_indirect(adapter,
indirect_add);
if (ret == -EIO)
return -EIO;
ret = QLCRD32(adapter, indirect_add, &err);
if (err == -EIO)
return err;
word = ret;
*(u32 *)p_data = word;
@ -2428,10 +2433,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
/* Single sector read */
for (i = 0; i < count; i++) {
indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
ret = qlcnic_83xx_rd_reg_indirect(adapter,
indirect_add);
if (ret == -EIO)
return -EIO;
ret = QLCRD32(adapter, indirect_add, &err);
if (err == -EIO)
return err;
word = ret;
*(u32 *)p_data = word;
@ -2447,10 +2451,13 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
{
u32 status;
int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
int err = 0;
do {
status = qlcnic_83xx_rd_reg_indirect(adapter,
QLC_83XX_FLASH_STATUS);
status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err);
if (err == -EIO)
return err;
if ((status & QLC_83XX_FLASH_STATUS_READY) ==
QLC_83XX_FLASH_STATUS_READY)
break;
@ -2502,7 +2509,8 @@ int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
{
int ret, mfg_id;
int ret, err = 0;
u32 mfg_id;
if (qlcnic_83xx_lock_flash(adapter))
return -EIO;
@ -2517,9 +2525,11 @@ int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
return -EIO;
}
mfg_id = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
if (mfg_id == -EIO)
return -EIO;
mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
if (err == -EIO) {
qlcnic_83xx_unlock_flash(adapter);
return err;
}
adapter->flash_mfg_id = (mfg_id & 0xFF);
qlcnic_83xx_unlock_flash(adapter);
@ -2636,7 +2646,7 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
u32 *p_data, int count)
{
u32 temp;
int ret = -EIO;
int ret = -EIO, err = 0;
if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
(count > QLC_83XX_FLASH_WRITE_MAX)) {
@ -2645,8 +2655,10 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
return -EIO;
}
temp = qlcnic_83xx_rd_reg_indirect(adapter,
QLC_83XX_FLASH_SPI_CONTROL);
temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
if (err == -EIO)
return err;
qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
(temp | QLC_83XX_FLASH_SPI_CTRL));
qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
@ -2695,13 +2707,18 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
return -EIO;
}
ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_SPI_STATUS);
ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err);
if (err == -EIO)
return err;
if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
__func__, __LINE__);
/* Operation failed, clear error bit */
temp = qlcnic_83xx_rd_reg_indirect(adapter,
QLC_83XX_FLASH_SPI_CONTROL);
temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
if (err == -EIO)
return err;
qlcnic_83xx_wrt_reg_indirect(adapter,
QLC_83XX_FLASH_SPI_CONTROL,
(temp | QLC_83XX_FLASH_SPI_CTRL));
@ -2823,6 +2840,7 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
{
int i, j, ret = 0;
u32 temp;
int err = 0;
/* Check alignment */
if (addr & 0xF)
@ -2855,8 +2873,12 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
QLCNIC_TA_WRITE_START);
for (j = 0; j < MAX_CTL_CHECK; j++) {
temp = qlcnic_83xx_rd_reg_indirect(adapter,
QLCNIC_MS_CTRL);
temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err);
if (err == -EIO) {
mutex_unlock(&adapter->ahw->mem_lock);
return err;
}
if ((temp & TA_CTL_BUSY) == 0)
break;
}
@ -2878,9 +2900,9 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
u8 *p_data, int count)
{
int i, ret;
u32 word, addr = flash_addr;
u32 word, addr = flash_addr, ret;
ulong indirect_addr;
int i, err = 0;
if (qlcnic_83xx_lock_flash(adapter) != 0)
return -EIO;
@ -2900,10 +2922,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
}
indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
ret = qlcnic_83xx_rd_reg_indirect(adapter,
indirect_addr);
if (ret == -EIO)
return -EIO;
ret = QLCRD32(adapter, indirect_addr, &err);
if (err == -EIO)
return err;
word = ret;
*(u32 *)p_data = word;
p_data = p_data + 4;
@ -3369,7 +3391,8 @@ int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
{
int ret;
int ret, err = 0;
u32 temp;
qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
QLC_83XX_FLASH_OEM_READ_SIG);
@ -3379,8 +3402,11 @@ static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
if (ret)
return -EIO;
ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
return ret & 0xFF;
temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
if (err == -EIO)
return err;
return temp & 0xFF;
}
int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)

View File

@ -508,7 +508,7 @@ void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong);
int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []);
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);

View File

@ -1303,8 +1303,11 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
{
int i, j;
u32 val = 0, val1 = 0, reg = 0;
int err = 0;
val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG);
val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err);
if (err == -EIO)
return;
dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val);
for (j = 0; j < 2; j++) {
@ -1318,7 +1321,9 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
reg = QLC_83XX_PORT1_THRESHOLD;
}
for (i = 0; i < 8; i++) {
val = QLCRD32(adapter, reg + (i * 0x4));
val = QLCRD32(adapter, reg + (i * 0x4), &err);
if (err == -EIO)
return;
dev_info(&adapter->pdev->dev, "0x%x ", val);
}
dev_info(&adapter->pdev->dev, "\n");
@ -1335,8 +1340,10 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
reg = QLC_83XX_PORT1_TC_MC_REG;
}
for (i = 0; i < 4; i++) {
val = QLCRD32(adapter, reg + (i * 0x4));
dev_info(&adapter->pdev->dev, "0x%x ", val);
val = QLCRD32(adapter, reg + (i * 0x4), &err);
if (err == -EIO)
return;
dev_info(&adapter->pdev->dev, "0x%x ", val);
}
dev_info(&adapter->pdev->dev, "\n");
}
@ -1352,17 +1359,25 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
reg = QLC_83XX_PORT1_TC_STATS;
}
for (i = 7; i >= 0; i--) {
val = QLCRD32(adapter, reg);
val = QLCRD32(adapter, reg, &err);
if (err == -EIO)
return;
val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
QLCWR32(adapter, reg, (val | (i << 29)));
val = QLCRD32(adapter, reg);
val = QLCRD32(adapter, reg, &err);
if (err == -EIO)
return;
dev_info(&adapter->pdev->dev, "0x%x ", val);
}
dev_info(&adapter->pdev->dev, "\n");
}
val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD);
val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD);
val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err);
if (err == -EIO)
return;
val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err);
if (err == -EIO)
return;
dev_info(&adapter->pdev->dev,
"IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
val, val1);
@ -1425,7 +1440,7 @@ static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter)
static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
{
u32 heartbeat, peg_status;
int retries, ret = -EIO;
int retries, ret = -EIO, err = 0;
retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev,
@ -1453,11 +1468,11 @@ static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
"PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
"PEG_NET_4_PC: 0x%x\n", peg_status,
QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2),
QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0),
QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1),
QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2),
QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3),
QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4));
QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err),
QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err),
QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err),
QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err),
QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err));
if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
dev_err(&p_dev->pdev->dev,
@ -1501,18 +1516,22 @@ int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr,
int duration, u32 mask, u32 status)
{
int timeout_error, err = 0;
u32 value;
int timeout_error;
u8 retries;
value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
value = QLCRD32(p_dev, addr, &err);
if (err == -EIO)
return err;
retries = duration / 10;
do {
if ((value & mask) != status) {
timeout_error = 1;
msleep(duration / 10);
value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
value = QLCRD32(p_dev, addr, &err);
if (err == -EIO)
return err;
} else {
timeout_error = 0;
break;
@ -1606,9 +1625,12 @@ int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev,
u32 raddr, u32 waddr)
{
int value;
int err = 0;
u32 value;
value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
value = QLCRD32(p_dev, raddr, &err);
if (err == -EIO)
return;
qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
}
@ -1617,12 +1639,16 @@ static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev,
u32 raddr, u32 waddr,
struct qlc_83xx_rmw *p_rmw_hdr)
{
int value;
int err = 0;
u32 value;
if (p_rmw_hdr->index_a)
if (p_rmw_hdr->index_a) {
value = p_dev->ahw->reset.array[p_rmw_hdr->index_a];
else
value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
} else {
value = QLCRD32(p_dev, raddr, &err);
if (err == -EIO)
return;
}
value &= p_rmw_hdr->mask;
value <<= p_rmw_hdr->shl;
@ -1675,7 +1701,7 @@ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
long delay;
struct qlc_83xx_entry *entry;
struct qlc_83xx_poll *poll;
int i;
int i, err = 0;
unsigned long arg1, arg2;
poll = (struct qlc_83xx_poll *)((char *)p_hdr +
@ -1699,10 +1725,12 @@ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
arg1, delay,
poll->mask,
poll->status)){
qlcnic_83xx_rd_reg_indirect(p_dev,
arg1);
qlcnic_83xx_rd_reg_indirect(p_dev,
arg2);
QLCRD32(p_dev, arg1, &err);
if (err == -EIO)
return;
QLCRD32(p_dev, arg2, &err);
if (err == -EIO)
return;
}
}
}
@ -1768,7 +1796,7 @@ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
struct qlc_83xx_entry_hdr *p_hdr)
{
long delay;
int index, i, j;
int index, i, j, err;
struct qlc_83xx_quad_entry *entry;
struct qlc_83xx_poll *poll;
unsigned long addr;
@ -1788,7 +1816,10 @@ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
poll->mask, poll->status)){
index = p_dev->ahw->reset.array_index;
addr = entry->dr_addr;
j = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
j = QLCRD32(p_dev, addr, &err);
if (err == -EIO)
return;
p_dev->ahw->reset.array[index++] = j;
if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES)

View File

@ -104,7 +104,7 @@ static u32
qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
{
u32 rsp;
int timeout = 0;
int timeout = 0, err = 0;
do {
/* give atleast 1ms for firmware to respond */
@ -113,7 +113,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
return QLCNIC_CDRP_RSP_TIMEOUT;
rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err);
} while (!QLCNIC_CDRP_IS_RSP(rsp));
return rsp;
@ -122,7 +122,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
int i;
int i, err = 0;
u32 rsp;
u32 signature;
struct pci_dev *pdev = adapter->pdev;
@ -148,7 +148,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
dev_err(&pdev->dev, "card response timeout.\n");
cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1));
cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
switch (cmd->rsp.arg[0]) {
case QLCNIC_RCODE_INVALID_ARGS:
fmt = "CDRP invalid args: [%d]\n";
@ -175,7 +175,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
for (i = 1; i < cmd->rsp.num; i++)
cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i));
cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err);
/* Release semaphore */
qlcnic_api_unlock(adapter);
@ -210,10 +210,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd)
if (err) {
dev_info(&adapter->pdev->dev,
"Failed to set driver version in firmware\n");
return -EIO;
err = -EIO;
}
return 0;
qlcnic_free_mbx_args(&cmd);
return err;
}
int

View File

@ -150,6 +150,7 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
"Link_Test_on_offline",
"Interrupt_Test_offline",
"Internal_Loopback_offline",
"External_Loopback_offline",
"EEPROM_Test_offline"
};
@ -266,7 +267,7 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 speed, reg;
int check_sfp_module = 0;
int check_sfp_module = 0, err = 0;
u16 pcifn = ahw->pci_func;
/* read which mode */
@ -289,7 +290,7 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
} else if (adapter->ahw->port_type == QLCNIC_XGBE) {
u32 val = 0;
val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err);
if (val == QLCNIC_PORT_MODE_802_3_AP) {
ecmd->supported = SUPPORTED_1000baseT_Full;
@ -300,9 +301,13 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
}
if (netif_running(adapter->netdev) && ahw->has_link_events) {
reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
speed = P3P_LINK_SPEED_VAL(pcifn, reg);
ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
if (ahw->linkup) {
reg = QLCRD32(adapter,
P3P_LINK_SPEED_REG(pcifn), &err);
speed = P3P_LINK_SPEED_VAL(pcifn, reg);
ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
}
ethtool_cmd_speed_set(ecmd, ahw->link_speed);
ecmd->autoneg = ahw->link_autoneg;
ecmd->duplex = ahw->link_duplex;
@ -463,13 +468,14 @@ static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
u32 *regs_buff)
{
int i, j = 0;
int i, j = 0, err = 0;
for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
j = 0;
while (ext_diag_registers[j] != -1)
regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++]);
regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++],
&err);
return i;
}
@ -519,13 +525,16 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
static u32 qlcnic_test_link(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err = 0;
u32 val;
if (qlcnic_83xx_check(adapter)) {
val = qlcnic_83xx_test_link(adapter);
return (val & 1) ? 0 : 1;
}
val = QLCRD32(adapter, CRB_XG_STATE_P3P);
val = QLCRD32(adapter, CRB_XG_STATE_P3P, &err);
if (err == -EIO)
return err;
val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
return (val == XG_LINK_UP_P3P) ? 0 : 1;
}
@ -658,6 +667,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int port = adapter->ahw->physical_port;
int err = 0;
__u32 val;
if (qlcnic_83xx_check(adapter)) {
@ -668,9 +678,13 @@ qlcnic_get_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return;
/* get flow control settings */
val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
if (err == -EIO)
return;
pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
if (err == -EIO)
return;
switch (port) {
case 0:
pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
@ -690,7 +704,9 @@ qlcnic_get_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
return;
pause->rx_pause = 1;
val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
if (err == -EIO)
return;
if (port == 0)
pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
else
@ -707,6 +723,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int port = adapter->ahw->physical_port;
int err = 0;
__u32 val;
if (qlcnic_83xx_check(adapter))
@ -717,7 +734,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return -EIO;
/* set flow control */
val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
if (err == -EIO)
return err;
if (pause->rx_pause)
qlcnic_gb_rx_flowctl(val);
@ -728,7 +747,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
val);
QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
/* set autoneg */
val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
if (err == -EIO)
return err;
switch (port) {
case 0:
if (pause->tx_pause)
@ -764,7 +785,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
return -EIO;
val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
if (err == -EIO)
return err;
if (port == 0) {
if (pause->tx_pause)
qlcnic_xg_unset_xg0_mask(val);
@ -788,11 +811,14 @@ static int qlcnic_reg_test(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 data_read;
int err = 0;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_reg_test(adapter);
data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0), &err);
if (err == -EIO)
return err;
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
@ -1026,8 +1052,15 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
if (data[3])
eth_test->flags |= ETH_TEST_FL_FAILED;
data[4] = qlcnic_eeprom_test(dev);
if (data[4])
if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
if (data[4])
eth_test->flags |= ETH_TEST_FL_FAILED;
eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
}
data[5] = qlcnic_eeprom_test(dev);
if (data[5])
eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
@ -1257,17 +1290,20 @@ qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
int err = 0;
if (qlcnic_83xx_check(adapter))
return;
wol->supported = 0;
wol->wolopts = 0;
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
if (err == -EIO)
return;
if (wol_cfg & (1UL << adapter->portnum))
wol->supported |= WAKE_MAGIC;
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
if (wol_cfg & (1UL << adapter->portnum))
wol->wolopts |= WAKE_MAGIC;
}
@ -1277,17 +1313,22 @@ qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
int err = 0;
if (qlcnic_83xx_check(adapter))
return -EOPNOTSUPP;
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
if (err == -EIO)
return err;
if (!(wol_cfg & (1 << adapter->portnum)))
return -EOPNOTSUPP;
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
if (err == -EIO)
return err;
if (wol->wolopts & WAKE_MAGIC)
wol_cfg |= 1UL << adapter->portnum;
else

View File

@ -317,16 +317,20 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
int
qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
{
int done = 0, timeout = 0;
int timeout = 0;
int err = 0;
u32 done = 0;
while (!done) {
done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
&err);
if (done == 1)
break;
if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
dev_err(&adapter->pdev->dev,
"Failed to acquire sem=%d lock; holdby=%d\n",
sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
sem,
id_reg ? QLCRD32(adapter, id_reg, &err) : -1);
return -EIO;
}
msleep(1);
@ -341,19 +345,22 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
void
qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
{
QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
int err = 0;
QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)), &err);
}
int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
{
int err = 0;
u32 data;
if (qlcnic_82xx_check(adapter))
qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
else {
data = qlcnic_83xx_rd_reg_indirect(adapter, addr);
if (data == -EIO)
return -EIO;
data = QLCRD32(adapter, addr, &err);
if (err == -EIO)
return err;
}
return data;
}
@ -1159,7 +1166,8 @@ int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off,
return -EIO;
}
int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off,
int *err)
{
unsigned long flags;
int rv;
@ -1415,7 +1423,7 @@ int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
{
int offset, board_type, magic;
int offset, board_type, magic, err = 0;
struct pci_dev *pdev = adapter->pdev;
offset = QLCNIC_FW_MAGIC_OFFSET;
@ -1435,7 +1443,9 @@ int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
adapter->ahw->board_type = board_type;
if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I, &err);
if (err == -EIO)
return err;
if ((gpio & 0x8000) == 0)
board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
}
@ -1475,10 +1485,13 @@ int
qlcnic_wol_supported(struct qlcnic_adapter *adapter)
{
u32 wol_cfg;
int err = 0;
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
if (wol_cfg & (1UL << adapter->portnum)) {
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
if (err == -EIO)
return err;
if (wol_cfg & (1 << adapter->portnum))
return 1;
}
@ -1539,6 +1552,7 @@ void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
loff_t offset, size_t size)
{
int err = 0;
u32 data;
u64 qmdata;
@ -1546,7 +1560,7 @@ void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
memcpy(buf, &qmdata, size);
} else {
data = QLCRD32(adapter, offset);
data = QLCRD32(adapter, offset, &err);
memcpy(buf, &data, size);
}
}

View File

@ -154,7 +154,7 @@ struct qlcnic_hardware_context;
struct qlcnic_adapter;
int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong);
int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);

View File

@ -286,10 +286,11 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
{
long timeout = 0;
long done = 0;
int err = 0;
cond_resched();
while (done == 0) {
done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS, &err);
done &= 2;
if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
dev_err(&adapter->pdev->dev,
@ -304,6 +305,8 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
static int do_rom_fast_read(struct qlcnic_adapter *adapter,
u32 addr, u32 *valp)
{
int err = 0;
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
@ -317,7 +320,9 @@ static int do_rom_fast_read(struct qlcnic_adapter *adapter,
udelay(10);
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
*valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
*valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA, &err);
if (err == -EIO)
return err;
return 0;
}
@ -369,11 +374,11 @@ int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
{
int addr, val;
int addr, err = 0;
int i, n, init_delay;
struct crb_addr_pair *buf;
unsigned offset;
u32 off;
u32 off, val;
struct pci_dev *pdev = adapter->pdev;
QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0);
@ -402,7 +407,9 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00);
/* halt sre */
val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000);
val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000, &err);
if (err == -EIO)
return err;
QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1)));
/* halt epg */
@ -719,10 +726,12 @@ qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
static int
qlcnic_has_mn(struct qlcnic_adapter *adapter)
{
u32 capability;
capability = 0;
u32 capability = 0;
int err = 0;
capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY, &err);
if (err == -EIO)
return err;
if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
return 1;

View File

@ -161,36 +161,68 @@ static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
}
static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
struct qlcnic_filter *fil,
void *addr, u16 vlan_id)
{
int ret;
u8 op;
op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
if (ret)
return;
op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
if (!ret) {
hlist_del(&fil->fnode);
adapter->rx_fhash.fnum--;
}
}
static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
void *addr, u16 vlan_id)
{
struct qlcnic_filter *tmp_fil = NULL;
struct hlist_node *n;
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id)
return tmp_fil;
}
return NULL;
}
void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
int loopback_pkt, u16 vlan_id)
{
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil;
struct hlist_node *n;
struct hlist_head *head;
unsigned long time;
u64 src_addr = 0;
u8 hindex, found = 0, op;
u8 hindex, op;
int ret;
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
hindex = qlcnic_mac_hash(src_addr) &
(adapter->fhash.fbucket_size - 1);
if (loopback_pkt) {
if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
return;
hindex = qlcnic_mac_hash(src_addr) &
(adapter->fhash.fbucket_size - 1);
head = &(adapter->rx_fhash.fhead[hindex]);
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
time = tmp_fil->ftime;
if (jiffies > (QLCNIC_READD_AGE * HZ + time))
tmp_fil->ftime = jiffies;
return;
}
tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
if (tmp_fil) {
time = tmp_fil->ftime;
if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
tmp_fil->ftime = jiffies;
return;
}
fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
@ -205,36 +237,37 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
adapter->rx_fhash.fnum++;
spin_unlock(&adapter->rx_mac_learn_lock);
} else {
hindex = qlcnic_mac_hash(src_addr) &
(adapter->fhash.fbucket_size - 1);
head = &(adapter->rx_fhash.fhead[hindex]);
spin_lock(&adapter->rx_mac_learn_lock);
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
found = 1;
break;
}
}
head = &adapter->fhash.fhead[hindex];
if (!found) {
spin_unlock(&adapter->rx_mac_learn_lock);
return;
}
spin_lock(&adapter->mac_learn_lock);
op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr,
vlan_id, op);
if (!ret) {
tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
if (tmp_fil) {
op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
ret = qlcnic_sre_macaddr_change(adapter,
(u8 *)&src_addr,
vlan_id, op);
if (!ret) {
hlist_del(&(tmp_fil->fnode));
adapter->rx_fhash.fnum--;
hlist_del(&tmp_fil->fnode);
adapter->fhash.fnum--;
}
spin_unlock(&adapter->mac_learn_lock);
return;
}
spin_unlock(&adapter->mac_learn_lock);
head = &adapter->rx_fhash.fhead[hindex];
spin_lock(&adapter->rx_mac_learn_lock);
tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
if (tmp_fil)
qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
vlan_id);
spin_unlock(&adapter->rx_mac_learn_lock);
}
}
@ -262,7 +295,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
vlan_req->vlan_id = cpu_to_le16(vlan_id);

View File

@ -977,8 +977,8 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
static int
qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
{
int err;
struct qlcnic_info nic_info;
int err = 0;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
@ -993,7 +993,9 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
u32 temp;
temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2, &err);
if (err == -EIO)
return err;
adapter->ahw->extra_capability[0] = temp;
}
adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
@ -2141,7 +2143,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
!!qlcnic_use_msi)
dev_warn(&pdev->dev,
"83xx adapter do not support MSI interrupts\n");
"Device does not support MSI interrupts\n");
err = qlcnic_setup_intr(adapter, 0);
if (err) {
@ -3095,6 +3097,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
{
u32 state = 0, heartbeat;
u32 peg_status;
int err = 0;
if (qlcnic_check_temp(adapter))
goto detach;
@ -3141,11 +3144,11 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
"PEG_NET_4_PC: 0x%x\n",
peg_status,
QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, &err),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, &err),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, &err),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, &err),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, &err));
if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
dev_err(&adapter->pdev->dev,
"Firmware aborted with error code 0x00006700. "

View File

@ -562,7 +562,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
INIT_LIST_HEAD(&adapter->vf_mc_list);
if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
dev_warn(&adapter->pdev->dev,
"83xx adapter do not support MSI interrupts\n");
"Device does not support MSI interrupts\n");
err = qlcnic_setup_intr(adapter, 1);
if (err) {

View File

@ -478,7 +478,7 @@ rx_status_loop:
while (1) {
u32 status, len;
dma_addr_t mapping;
dma_addr_t mapping, new_mapping;
struct sk_buff *skb, *new_skb;
struct cp_desc *desc;
const unsigned buflen = cp->rx_buf_sz;
@ -520,6 +520,13 @@ rx_status_loop:
goto rx_next;
}
new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
dev->stats.rx_dropped++;
goto rx_next;
}
dma_unmap_single(&cp->pdev->dev, mapping,
buflen, PCI_DMA_FROMDEVICE);
@ -531,12 +538,11 @@ rx_status_loop:
skb_put(skb, len);
mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
PCI_DMA_FROMDEVICE);
cp->rx_skb[rx_tail] = new_skb;
cp_rx_skb(cp, skb, desc);
rx++;
mapping = new_mapping;
rx_next:
cp->rx_ring[rx_tail].opts2 = 0;
@ -716,6 +722,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
}
static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
int first, int entry_last)
{
int frag, index;
struct cp_desc *txd;
skb_frag_t *this_frag;
for (frag = 0; frag+first < entry_last; frag++) {
index = first+frag;
cp->tx_skb[index] = NULL;
txd = &cp->tx_ring[index];
this_frag = &skb_shinfo(skb)->frags[frag];
dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
skb_frag_size(this_frag), PCI_DMA_TODEVICE);
}
}
static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
struct net_device *dev)
{
@ -749,6 +771,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
len = skb->len;
mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&cp->pdev->dev, mapping))
goto out_dma_error;
txd->opts2 = opts2;
txd->addr = cpu_to_le64(mapping);
wmb();
@ -786,6 +811,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
first_len = skb_headlen(skb);
first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
first_len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&cp->pdev->dev, first_mapping))
goto out_dma_error;
cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry);
@ -799,6 +827,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
mapping = dma_map_single(&cp->pdev->dev,
skb_frag_address(this_frag),
len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&cp->pdev->dev, mapping)) {
unwind_tx_frag_mapping(cp, skb, first_entry, entry);
goto out_dma_error;
}
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
ctrl = eor | len | DescOwn;
@ -859,11 +892,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
out_unlock:
spin_unlock_irqrestore(&cp->lock, intr_flags);
cpw8(TxPoll, NormalTxPoll);
return NETDEV_TX_OK;
out_dma_error:
kfree_skb(skb);
cp->dev->stats.tx_dropped++;
goto out_unlock;
}
/* Set or clear the multicast filter for this adaptor.
@ -1054,6 +1092,10 @@ static int cp_refill_rx(struct cp_private *cp)
mapping = dma_map_single(&cp->pdev->dev, skb->data,
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&cp->pdev->dev, mapping)) {
kfree_skb(skb);
goto err_out;
}
cp->rx_skb[i] = skb;
cp->rx_ring[i].opts2 = 0;

View File

@ -3689,7 +3689,7 @@ static void rtl_phy_work(struct rtl8169_private *tp)
if (tp->link_ok(ioaddr))
return;
netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
tp->phy_reset_enable(tp);

View File

@ -1318,7 +1318,7 @@ static void sis900_timer(unsigned long data)
if (duplex){
sis900_set_mode(sis_priv, speed, duplex);
sis630_set_eq(net_dev, sis_priv->chipset_rev);
netif_start_queue(net_dev);
netif_carrier_on(net_dev);
}
sis_priv->timer.expires = jiffies + HZ;
@ -1336,10 +1336,8 @@ static void sis900_timer(unsigned long data)
status = sis900_default_phy(net_dev);
mii_phy = sis_priv->mii;
if (status & MII_STAT_LINK){
if (status & MII_STAT_LINK)
sis900_check_mode(net_dev, mii_phy);
netif_carrier_on(net_dev);
}
} else {
/* Link ON -> OFF */
if (!(status & MII_STAT_LINK)){
@ -1612,12 +1610,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
unsigned int index_cur_tx, index_dirty_tx;
unsigned int count_dirty_tx;
/* Don't transmit data before the complete of auto-negotiation */
if(!sis_priv->autong_complete){
netif_stop_queue(net_dev);
return NETDEV_TX_BUSY;
}
spin_lock_irqsave(&sis_priv->lock, flags);
/* Calculate the next Tx descriptor entry. */

View File

@ -1867,7 +1867,7 @@ static int cpsw_probe(struct platform_device *pdev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
if (request_irq(i, cpsw_interrupt, IRQF_DISABLED,
if (request_irq(i, cpsw_interrupt, 0,
dev_name(&pdev->dev), priv)) {
dev_err(priv->dev, "error attaching irq\n");
goto clean_ale_ret;

View File

@ -1568,8 +1568,7 @@ static int emac_dev_open(struct net_device *ndev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
IRQF_DISABLED,
ndev->name, ndev))
0, ndev->name, ndev))
goto rollback;
}
k++;

View File

@ -337,8 +337,11 @@ static int macvlan_open(struct net_device *dev)
int err;
if (vlan->port->passthru) {
if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
dev_set_promiscuity(lowerdev, 1);
if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
err = dev_set_promiscuity(lowerdev, 1);
if (err < 0)
goto out;
}
goto hash_add;
}
@ -863,6 +866,18 @@ static int macvlan_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct macvlan_dev *vlan = netdev_priv(dev);
enum macvlan_mode mode;
bool set_mode = false;
/* Validate mode, but don't set yet: setting flags may fail. */
if (data && data[IFLA_MACVLAN_MODE]) {
set_mode = true;
mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
/* Passthrough mode can't be set or cleared dynamically */
if ((mode == MACVLAN_MODE_PASSTHRU) !=
(vlan->mode == MACVLAN_MODE_PASSTHRU))
return -EINVAL;
}
if (data && data[IFLA_MACVLAN_FLAGS]) {
__u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
@ -879,8 +894,8 @@ static int macvlan_changelink(struct net_device *dev,
}
vlan->flags = flags;
}
if (data && data[IFLA_MACVLAN_MODE])
vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
if (set_mode)
vlan->mode = mode;
return 0;
}

View File

@ -344,17 +344,41 @@ static const int multicast_filter_limit = 32;
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
return usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
int ret;
void *tmp;
tmp = kmalloc(size, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
value, index, data, size, 500);
value, index, tmp, size, 500);
memcpy(data, tmp, size);
kfree(tmp);
return ret;
}
static
int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
return usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
int ret;
void *tmp;
tmp = kmalloc(size, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
memcpy(tmp, data, size);
ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
value, index, data, size, 500);
value, index, tmp, size, 500);
kfree(tmp);
return ret;
}
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
@ -490,37 +514,31 @@ int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data)
static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index)
{
u32 data;
__le32 data;
if (type == MCU_TYPE_PLA)
pla_ocp_read(tp, index, sizeof(data), &data);
else
usb_ocp_read(tp, index, sizeof(data), &data);
generic_ocp_read(tp, index, sizeof(data), &data, type);
return __le32_to_cpu(data);
}
static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data)
{
if (type == MCU_TYPE_PLA)
pla_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data);
else
usb_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data);
__le32 tmp = __cpu_to_le32(data);
generic_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(tmp), &tmp, type);
}
static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index)
{
u32 data;
__le32 tmp;
u8 shift = index & 2;
index &= ~3;
if (type == MCU_TYPE_PLA)
pla_ocp_read(tp, index, sizeof(data), &data);
else
usb_ocp_read(tp, index, sizeof(data), &data);
generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
data = __le32_to_cpu(data);
data = __le32_to_cpu(tmp);
data >>= (shift * 8);
data &= 0xffff;
@ -529,7 +547,8 @@ static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index)
static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
{
u32 tmp, mask = 0xffff;
u32 mask = 0xffff;
__le32 tmp;
u16 byen = BYTE_EN_WORD;
u8 shift = index & 2;
@ -542,34 +561,25 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
if (type == MCU_TYPE_PLA)
pla_ocp_read(tp, index, sizeof(tmp), &tmp);
else
usb_ocp_read(tp, index, sizeof(tmp), &tmp);
generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
tmp = __le32_to_cpu(tmp) & ~mask;
tmp |= data;
tmp = __cpu_to_le32(tmp);
data |= __le32_to_cpu(tmp) & ~mask;
tmp = __cpu_to_le32(data);
if (type == MCU_TYPE_PLA)
pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
else
usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
}
static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index)
{
u32 data;
__le32 tmp;
u8 shift = index & 3;
index &= ~3;
if (type == MCU_TYPE_PLA)
pla_ocp_read(tp, index, sizeof(data), &data);
else
usb_ocp_read(tp, index, sizeof(data), &data);
generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
data = __le32_to_cpu(data);
data = __le32_to_cpu(tmp);
data >>= (shift * 8);
data &= 0xff;
@ -578,7 +588,8 @@ static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index)
static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
{
u32 tmp, mask = 0xff;
u32 mask = 0xff;
__le32 tmp;
u16 byen = BYTE_EN_BYTE;
u8 shift = index & 3;
@ -591,19 +602,12 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
if (type == MCU_TYPE_PLA)
pla_ocp_read(tp, index, sizeof(tmp), &tmp);
else
usb_ocp_read(tp, index, sizeof(tmp), &tmp);
generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
tmp = __le32_to_cpu(tmp) & ~mask;
tmp |= data;
tmp = __cpu_to_le32(tmp);
data |= __le32_to_cpu(tmp) & ~mask;
tmp = __cpu_to_le32(data);
if (type == MCU_TYPE_PLA)
pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
else
usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
}
static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
@ -685,21 +689,14 @@ static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
static inline void set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
u8 *node_id;
u8 node_id[8] = {0};
node_id = kmalloc(sizeof(u8) * 8, GFP_KERNEL);
if (!node_id) {
netif_err(tp, probe, dev, "out of memory");
return;
}
if (pla_ocp_read(tp, PLA_IDR, sizeof(u8) * 8, node_id) < 0)
if (pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id) < 0)
netif_notice(tp, probe, dev, "inet addr fail\n");
else {
memcpy(dev->dev_addr, node_id, dev->addr_len);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
}
kfree(node_id);
}
static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
@ -882,15 +879,10 @@ static void rtl8152_set_rx_mode(struct net_device *netdev)
static void _rtl8152_set_rx_mode(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
u32 tmp, *mc_filter; /* Multicast hash filter */
u32 mc_filter[2]; /* Multicast hash filter */
__le32 tmp[2];
u32 ocp_data;
mc_filter = kmalloc(sizeof(u32) * 2, GFP_KERNEL);
if (!mc_filter) {
netif_err(tp, link, netdev, "out of memory");
return;
}
clear_bit(RTL8152_SET_RX_MODE, &tp->flags);
netif_stop_queue(netdev);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@ -918,14 +910,12 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
}
}
tmp = mc_filter[0];
mc_filter[0] = __cpu_to_le32(swab32(mc_filter[1]));
mc_filter[1] = __cpu_to_le32(swab32(tmp));
tmp[0] = __cpu_to_le32(swab32(mc_filter[1]));
tmp[1] = __cpu_to_le32(swab32(mc_filter[0]));
pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(u32) * 2, mc_filter);
pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(tmp), tmp);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
netif_wake_queue(netdev);
kfree(mc_filter);
}
static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,

View File

@ -24,34 +24,43 @@
static int pla_read_word(struct usb_device *udev, u16 index)
{
int data, ret;
int ret;
u8 shift = index & 2;
__le32 ocp_data;
__le32 *tmp;
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
index &= ~3;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data),
500);
index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
if (ret < 0)
return ret;
goto out2;
data = __le32_to_cpu(ocp_data);
data >>= (shift * 8);
data &= 0xffff;
ret = __le32_to_cpu(*tmp);
ret >>= (shift * 8);
ret &= 0xffff;
return data;
out2:
kfree(tmp);
return ret;
}
static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
{
__le32 ocp_data;
__le32 *tmp;
u32 mask = 0xffff;
u16 byen = BYTE_EN_WORD;
u8 shift = index & 2;
int ret;
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
data &= mask;
if (shift) {
@ -63,19 +72,20 @@ static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data),
500);
index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
if (ret < 0)
return ret;
goto out3;
data |= __le32_to_cpu(ocp_data) & ~mask;
ocp_data = __cpu_to_le32(data);
data |= __le32_to_cpu(*tmp) & ~mask;
*tmp = __cpu_to_le32(data);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE,
index, MCU_TYPE_PLA | byen, &ocp_data,
sizeof(ocp_data), 500);
index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp),
500);
out3:
kfree(tmp);
return ret;
}
@ -116,11 +126,18 @@ out1:
static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg)
{
struct usbnet *dev = netdev_priv(netdev);
int ret;
if (phy_id != R815x_PHY_ID)
return -EINVAL;
return ocp_reg_read(dev, BASE_MII + reg * 2);
if (usb_autopm_get_interface(dev->intf) < 0)
return -ENODEV;
ret = ocp_reg_read(dev, BASE_MII + reg * 2);
usb_autopm_put_interface(dev->intf);
return ret;
}
static
@ -131,7 +148,12 @@ void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
if (phy_id != R815x_PHY_ID)
return;
if (usb_autopm_get_interface(dev->intf) < 0)
return;
ocp_reg_write(dev, BASE_MII + reg * 2, val);
usb_autopm_put_interface(dev->intf);
}
static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
@ -150,7 +172,7 @@ static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id = R815x_PHY_ID;
dev->mii.supports_gmii = 1;
return 0;
return status;
}
static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
@ -169,7 +191,7 @@ static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id = R815x_PHY_ID;
dev->mii.supports_gmii = 0;
return 0;
return status;
}
static const struct driver_info r8152_info = {

View File

@ -1,6 +1,6 @@
config ATH10K
tristate "Atheros 802.11ac wireless cards support"
depends on MAC80211
depends on MAC80211 && HAS_DMA
select ATH_COMMON
---help---
This module adds support for wireless adapters based on

View File

@ -1093,8 +1093,11 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n ");
err = brcmf_fil_cmd_data_set(vif->ifp,
BRCMF_C_DISASSOC, NULL, 0);
if (err)
if (err) {
brcmf_err("WLC_DISASSOC failed (%d)\n", err);
cfg80211_disconnected(vif->wdev.netdev, 0,
NULL, 0, GFP_KERNEL);
}
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
}
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);

View File

@ -97,6 +97,8 @@
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
#define APMG_RTC_INT_STT_RFKILL (0x10000000)
/* Device system time */
#define DEVICE_SYSTEM_TIME_REG 0xA0206C

View File

@ -134,7 +134,7 @@ struct wowlan_key_data {
struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
struct iwl_wowlan_tkip_params_cmd *tkip;
bool error, use_rsc_tsc, use_tkip;
int gtk_key_idx;
int wep_key_idx;
};
static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
@ -188,8 +188,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
wkc.wep_key.key_offset = 0;
} else {
/* others start at 1 */
data->gtk_key_idx++;
wkc.wep_key.key_offset = data->gtk_key_idx;
data->wep_key_idx++;
wkc.wep_key.key_offset = data->wep_key_idx;
}
ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC,
@ -316,8 +316,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
mvm->ptk_ivlen = key->iv_len;
mvm->ptk_icvlen = key->icv_len;
} else {
data->gtk_key_idx++;
key->hw_key_idx = data->gtk_key_idx;
/*
* firmware only supports TSC/RSC for a single key,
* so if there are multiple keep overwriting them
* with new ones -- this relies on mac80211 doing
* list_add_tail().
*/
key->hw_key_idx = 1;
mvm->gtk_ivlen = key->iv_len;
mvm->gtk_icvlen = key->icv_len;
}

View File

@ -69,7 +69,6 @@
/* Scan Commands, Responses, Notifications */
/* Masks for iwl_scan_channel.type flags */
#define SCAN_CHANNEL_TYPE_PASSIVE 0
#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0)
#define SCAN_CHANNEL_NARROW_BAND BIT(22)

View File

@ -511,6 +511,27 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_unlock;
/*
* TODO: remove this temporary code.
* Currently MVM FW supports power management only on single MAC.
* If new interface added, disable PM on existing interface.
* P2P device is a special case, since it is handled by FW similary to
* scan. If P2P deviced is added, PM remains enabled on existing
* interface.
* Note: the method below does not count the new interface being added
* at this moment.
*/
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count++;
if (mvm->vif_count > 1) {
IWL_DEBUG_MAC80211(mvm,
"Disable power on existing interfaces\n");
ieee80211_iterate_active_interfaces_atomic(
mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_pm_disable_iterator, mvm);
}
/*
* The AP binding flow can be done only after the beacon
* template is configured (which happens only in the mac80211
@ -534,27 +555,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_unlock;
}
/*
* TODO: remove this temporary code.
* Currently MVM FW supports power management only on single MAC.
* If new interface added, disable PM on existing interface.
* P2P device is a special case, since it is handled by FW similary to
* scan. If P2P deviced is added, PM remains enabled on existing
* interface.
* Note: the method below does not count the new interface being added
* at this moment.
*/
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count++;
if (mvm->vif_count > 1) {
IWL_DEBUG_MAC80211(mvm,
"Disable power on existing interfaces\n");
ieee80211_iterate_active_interfaces_atomic(
mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_pm_disable_iterator, mvm);
}
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret)
goto out_release;

View File

@ -178,19 +178,12 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
(cmd->data + le16_to_cpu(cmd->tx_cmd.len));
int i;
__le32 chan_type_value;
if (req->n_ssids > 0)
chan_type_value = cpu_to_le32(BIT(req->n_ssids) - 1);
else
chan_type_value = SCAN_CHANNEL_TYPE_PASSIVE;
for (i = 0; i < cmd->channel_count; i++) {
chan->channel = cpu_to_le16(req->channels[i]->hw_value);
chan->type = cpu_to_le32(BIT(req->n_ssids) - 1);
if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
chan->type = SCAN_CHANNEL_TYPE_PASSIVE;
else
chan->type = chan_type_value;
chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
chan->active_dwell = cpu_to_le16(active_dwell);
chan->passive_dwell = cpu_to_le16(passive_dwell);
chan->iteration_count = cpu_to_le16(1);

View File

@ -915,6 +915,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
u16 txq_id;
enum iwl_mvm_agg_state old_state;
/*
* First set the agg state to OFF to avoid calling
@ -924,13 +925,17 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
txq_id = tid_data->txq_id;
IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
mvmsta->sta_id, tid, txq_id, tid_data->state);
old_state = tid_data->state;
tid_data->state = IWL_AGG_OFF;
spin_unlock_bh(&mvmsta->lock);
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
if (old_state >= IWL_AGG_ON) {
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
}
iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
mvm->queue_to_mac80211[tid_data->txq_id] =
IWL_INVALID_MAC80211_QUEUE;

View File

@ -130,6 +130,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */

View File

@ -888,6 +888,14 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
if (hw_rfkill) {
/*
* Clear the interrupt in APMG if the NIC is going down.
* Note that when the NIC exits RFkill (else branch), we
* can't access prph and the NIC will be reset in
* start_hw anyway.
*/
iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
APMG_RTC_INT_STT_RFKILL);
set_bit(STATUS_RFKILL, &trans_pcie->status);
if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
&trans_pcie->status))

View File

@ -670,6 +670,11 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return err;
}
/* Reset the entire device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(10, 15);
iwl_pcie_apm_init(trans);
/* From now on, the op_mode will be kept updated about RF kill state */

View File

@ -1716,9 +1716,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int ret;
if (priv->bss_mode != NL80211_IFTYPE_STATION) {
if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
wiphy_err(wiphy,
"%s: reject infra assoc request in non-STA mode\n",
"%s: reject infra assoc request in non-STA role\n",
dev->name);
return -EINVAL;
}

View File

@ -415,7 +415,8 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
u32 k = 0;
struct mwifiex_adapter *adapter = priv->adapter;
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
switch (adapter->config_bands) {
case BAND_B:
dev_dbg(adapter->dev, "info: infra band=%d "

View File

@ -1291,8 +1291,10 @@ int mwifiex_associate(struct mwifiex_private *priv,
{
u8 current_bssid[ETH_ALEN];
/* Return error if the adapter or table entry is not marked as infra */
if ((priv->bss_mode != NL80211_IFTYPE_STATION) ||
/* Return error if the adapter is not STA role or table entry
* is not marked as infra.
*/
if ((GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) ||
(bss_desc->bss_mode != NL80211_IFTYPE_STATION))
return -1;

View File

@ -1639,8 +1639,8 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
/* Allocate buffer and copy payload */
blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
buf_block_len = (pkt_len + blk_size - 1) / blk_size;
*(u16 *) &payload[0] = (u16) pkt_len;
*(u16 *) &payload[2] = type;
*(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
*(__le16 *)&payload[2] = cpu_to_le16(type);
/*
* This is SDIO specific header

View File

@ -257,10 +257,10 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
goto done;
}
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
u8 config_bands;
/* Infra mode */
ret = mwifiex_deauthenticate(priv, NULL);
if (ret)
goto done;

View File

@ -936,13 +936,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
void rt2x00queue_pause_queue(struct data_queue *queue)
void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
{
if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
!test_bit(QUEUE_STARTED, &queue->flags) ||
test_and_set_bit(QUEUE_PAUSED, &queue->flags))
return;
switch (queue->qid) {
case QID_AC_VO:
case QID_AC_VI:
@ -958,6 +953,15 @@ void rt2x00queue_pause_queue(struct data_queue *queue)
break;
}
}
void rt2x00queue_pause_queue(struct data_queue *queue)
{
if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
!test_bit(QUEUE_STARTED, &queue->flags) ||
test_and_set_bit(QUEUE_PAUSED, &queue->flags))
return;
rt2x00queue_pause_queue_nocheck(queue);
}
EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
void rt2x00queue_unpause_queue(struct data_queue *queue)
@ -1019,7 +1023,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue)
return;
}
rt2x00queue_pause_queue(queue);
rt2x00queue_pause_queue_nocheck(queue);
queue->rt2x00dev->ops->lib->stop_queue(queue);

View File

@ -973,7 +973,7 @@ struct net_device_ops {
gfp_t gfp);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
int (*ndo_busy_poll)(struct napi_struct *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,

View File

@ -501,7 +501,7 @@ struct sk_buff {
/* 7/9 bit hole (depending on ndisc_nodetype presence) */
kmemcheck_bitfield_end(flags2);
#if defined CONFIG_NET_DMA || defined CONFIG_NET_LL_RX_POLL
#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
union {
unsigned int napi_id;
dma_cookie_t dma_cookie;

View File

@ -27,7 +27,7 @@
#include <linux/netdevice.h>
#include <net/ip.h>
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
struct napi_struct;
extern unsigned int sysctl_net_busy_read __read_mostly;
@ -146,7 +146,7 @@ static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
sk->sk_napi_id = skb->napi_id;
}
#else /* CONFIG_NET_LL_RX_POLL */
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void)
{
return 0;
@ -181,5 +181,10 @@ static inline bool busy_loop_timeout(unsigned long end_time)
return true;
}
#endif /* CONFIG_NET_LL_RX_POLL */
static inline bool sk_busy_loop(struct sock *sk, int nonblock)
{
return false;
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
#endif /* _LINUX_NET_BUSY_POLL_H */

View File

@ -300,7 +300,7 @@ extern void inet6_rt_notify(int event, struct rt6_info *rt,
struct nl_info *info);
extern void fib6_run_gc(unsigned long expires,
struct net *net);
struct net *net, bool force);
extern void fib6_gc_cleanup(void);

View File

@ -119,7 +119,7 @@ extern struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
* if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may
* also need a pad of 2.
*/
static int ndisc_addr_option_pad(unsigned short type)
static inline int ndisc_addr_option_pad(unsigned short type)
{
switch (type) {
case ARPHRD_INFINIBAND: return 2;

View File

@ -59,7 +59,7 @@ struct nfc_hci_ops {
struct nfc_target *target);
int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event,
struct sk_buff *skb);
int (*fw_upload)(struct nfc_hci_dev *hdev, const char *firmware_name);
int (*fw_download)(struct nfc_hci_dev *hdev, const char *firmware_name);
int (*discover_se)(struct nfc_hci_dev *dev);
int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx);
int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx);

View File

@ -68,7 +68,7 @@ struct nfc_ops {
void *cb_context);
int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb);
int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target);
int (*fw_upload)(struct nfc_dev *dev, const char *firmware_name);
int (*fw_download)(struct nfc_dev *dev, const char *firmware_name);
/* Secure Element API */
int (*discover_se)(struct nfc_dev *dev);
@ -127,7 +127,7 @@ struct nfc_dev {
int targets_generation;
struct device dev;
bool dev_up;
bool fw_upload_in_progress;
bool fw_download_in_progress;
u8 rf_mode;
bool polling;
struct nfc_target *active_target;

View File

@ -327,7 +327,7 @@ struct sock {
#ifdef CONFIG_RPS
__u32 sk_rxhash;
#endif
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_napi_id;
unsigned int sk_ll_usec;
#endif

View File

@ -69,8 +69,8 @@
* starting a poll from a device which has a secure element enabled means
* we want to do SE based card emulation.
* @NFC_CMD_DISABLE_SE: Disable the physical link to a specific secure element.
* @NFC_CMD_FW_UPLOAD: Request to Load/flash firmware, or event to inform that
* some firmware was loaded
* @NFC_CMD_FW_DOWNLOAD: Request to Load/flash firmware, or event to inform
* that some firmware was loaded
*/
enum nfc_commands {
NFC_CMD_UNSPEC,
@ -94,7 +94,7 @@ enum nfc_commands {
NFC_CMD_DISABLE_SE,
NFC_CMD_LLC_SDREQ,
NFC_EVENT_LLC_SDRES,
NFC_CMD_FW_UPLOAD,
NFC_CMD_FW_DOWNLOAD,
NFC_EVENT_SE_ADDED,
NFC_EVENT_SE_REMOVED,
/* private: internal use only */

View File

@ -244,7 +244,7 @@ config NETPRIO_CGROUP
Cgroup subsystem for use in assigning processes to network priorities on
a per-interface basis
config NET_LL_RX_POLL
config NET_RX_BUSY_POLL
boolean
default y

View File

@ -513,7 +513,10 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
hci_setup_event_mask(req);
if (hdev->hci_ver > BLUETOOTH_VER_1_1)
/* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
* local supported commands HCI command.
*/
if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
if (lmp_ssp_capable(hdev)) {
@ -2165,10 +2168,6 @@ int hci_register_dev(struct hci_dev *hdev)
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
write_lock(&hci_dev_list_lock);
list_add(&hdev->list, &hci_dev_list);
write_unlock(&hci_dev_list_lock);
hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
WQ_MEM_RECLAIM, 1, hdev->name);
if (!hdev->workqueue) {
@ -2203,6 +2202,10 @@ int hci_register_dev(struct hci_dev *hdev)
if (hdev->dev_type != HCI_AMP)
set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
write_lock(&hci_dev_list_lock);
list_add(&hdev->list, &hci_dev_list);
write_unlock(&hci_dev_list_lock);
hci_notify(hdev, HCI_DEV_REG);
hci_dev_hold(hdev);
@ -2215,9 +2218,6 @@ err_wqueue:
destroy_workqueue(hdev->req_workqueue);
err:
ida_simple_remove(&hci_index_ida, hdev->id);
write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
return error;
}
@ -3399,8 +3399,16 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
*/
if (hdev->sent_cmd) {
req_complete = bt_cb(hdev->sent_cmd)->req.complete;
if (req_complete)
if (req_complete) {
/* We must set the complete callback to NULL to
* avoid calling the callback more than once if
* this function gets called again.
*/
bt_cb(hdev->sent_cmd)->req.complete = NULL;
goto call_complete;
}
}
/* Remove all pending commands belonging to this request */

View File

@ -70,7 +70,8 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
}
mdst = br_mdb_get(br, skb, vid);
if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(br))
br_multicast_deliver(mdst, skb);
else
br_flood_deliver(br, skb, false);

View File

@ -101,7 +101,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
unicast = false;
} else if (is_multicast_ether_addr(dest)) {
mdst = br_mdb_get(br, skb, vid);
if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(br)) {
if ((mdst && mdst->mglist) ||
br_multicast_is_router(br))
skb2 = skb;

View File

@ -1014,6 +1014,16 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
}
#endif
static void br_multicast_update_querier_timer(struct net_bridge *br,
unsigned long max_delay)
{
if (!timer_pending(&br->multicast_querier_timer))
br->multicast_querier_delay_time = jiffies + max_delay;
mod_timer(&br->multicast_querier_timer,
jiffies + br->multicast_querier_interval);
}
/*
* Add port to router_list
* list is maintained ordered by pointer value
@ -1064,11 +1074,11 @@ timer:
static void br_multicast_query_received(struct net_bridge *br,
struct net_bridge_port *port,
int saddr)
int saddr,
unsigned long max_delay)
{
if (saddr)
mod_timer(&br->multicast_querier_timer,
jiffies + br->multicast_querier_interval);
br_multicast_update_querier_timer(br, max_delay);
else if (timer_pending(&br->multicast_querier_timer))
return;
@ -1096,8 +1106,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
br_multicast_query_received(br, port, !!iph->saddr);
group = ih->group;
if (skb->len == sizeof(*ih)) {
@ -1121,6 +1129,8 @@ static int br_ip4_multicast_query(struct net_bridge *br,
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
}
br_multicast_query_received(br, port, !!iph->saddr, max_delay);
if (!group)
goto out;
@ -1176,8 +1186,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
if (skb->len == sizeof(*mld)) {
if (!pskb_may_pull(skb, sizeof(*mld))) {
err = -EINVAL;
@ -1198,6 +1206,9 @@ static int br_ip6_multicast_query(struct net_bridge *br,
max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1;
}
br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr),
max_delay);
if (!group)
goto out;
@ -1643,6 +1654,8 @@ void br_multicast_init(struct net_bridge *br)
br->multicast_querier_interval = 255 * HZ;
br->multicast_membership_interval = 260 * HZ;
br->multicast_querier_delay_time = 0;
spin_lock_init(&br->multicast_lock);
setup_timer(&br->multicast_router_timer,
br_multicast_local_router_expired, 0);
@ -1831,6 +1844,8 @@ unlock:
int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
{
unsigned long max_delay;
val = !!val;
spin_lock_bh(&br->multicast_lock);
@ -1838,8 +1853,14 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
goto unlock;
br->multicast_querier = val;
if (val)
br_multicast_start_querier(br);
if (!val)
goto unlock;
max_delay = br->multicast_query_response_interval;
if (!timer_pending(&br->multicast_querier_timer))
br->multicast_querier_delay_time = jiffies + max_delay;
br_multicast_start_querier(br);
unlock:
spin_unlock_bh(&br->multicast_lock);

View File

@ -267,6 +267,7 @@ struct net_bridge
unsigned long multicast_query_interval;
unsigned long multicast_query_response_interval;
unsigned long multicast_startup_query_interval;
unsigned long multicast_querier_delay_time;
spinlock_t multicast_lock;
struct net_bridge_mdb_htable __rcu *mdb;
@ -501,6 +502,13 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
(br->multicast_router == 1 &&
timer_pending(&br->multicast_router_timer));
}
static inline bool br_multicast_querier_exists(struct net_bridge *br)
{
return time_is_before_jiffies(br->multicast_querier_delay_time) &&
(br->multicast_querier ||
timer_pending(&br->multicast_querier_timer));
}
#else
static inline int br_multicast_rcv(struct net_bridge *br,
struct net_bridge_port *port,
@ -557,6 +565,10 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
{
return 0;
}
static inline bool br_multicast_querier_exists(struct net_bridge *br)
{
return false;
}
static inline void br_mdb_init(void)
{
}

View File

@ -740,7 +740,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
skb_copy_secmark(new, old);
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
new->napi_id = old->napi_id;
#endif
}

View File

@ -900,7 +900,7 @@ set_rcvbuf:
sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
break;
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
case SO_BUSY_POLL:
/* allow unprivileged users to decrease the value */
if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
@ -1170,7 +1170,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
break;
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
case SO_BUSY_POLL:
v.val = sk->sk_ll_usec;
break;
@ -2292,7 +2292,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_stamp = ktime_set(-1L, 0);
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
sk->sk_napi_id = 0;
sk->sk_ll_usec = sysctl_net_busy_read;
#endif

View File

@ -21,7 +21,9 @@
#include <net/net_ratelimit.h>
#include <net/busy_poll.h>
static int zero = 0;
static int one = 1;
static int ushort_max = USHRT_MAX;
#ifdef CONFIG_RPS
static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
@ -298,7 +300,7 @@ static struct ctl_table net_core_table[] = {
.proc_handler = flow_limit_table_len_sysctl
},
#endif /* CONFIG_NET_FLOW_LIMIT */
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
{
.procname = "busy_poll",
.data = &sysctl_net_busy_poll,
@ -339,7 +341,9 @@ static struct ctl_table netns_core_table[] = {
.data = &init_net.core.sysctl_somaxconn,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
.extra1 = &zero,
.extra2 = &ushort_max,
.proc_handler = proc_dointvec_minmax
},
{ }
};

View File

@ -772,7 +772,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
ci = nla_data(tb[IFA_CACHEINFO]);
if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
err = -EINVAL;
goto errout;
goto errout_free;
}
*pvalid_lft = ci->ifa_valid;
*pprefered_lft = ci->ifa_prefered;
@ -780,6 +780,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
return ifa;
errout_free:
inet_free_ifa(ifa);
errout:
return ERR_PTR(err);
}

View File

@ -813,8 +813,9 @@ static u32 inet6_addr_hash(const struct in6_addr *addr)
/* On success it returns ifp with increased reference count */
static struct inet6_ifaddr *
ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
int scope, u32 flags)
ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
const struct in6_addr *peer_addr, int pfxlen,
int scope, u32 flags, u32 valid_lft, u32 prefered_lft)
{
struct inet6_ifaddr *ifa = NULL;
struct rt6_info *rt;
@ -863,6 +864,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
}
ifa->addr = *addr;
if (peer_addr)
ifa->peer_addr = *peer_addr;
spin_lock_init(&ifa->lock);
spin_lock_init(&ifa->state_lock);
@ -872,6 +875,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
ifa->scope = scope;
ifa->prefix_len = pfxlen;
ifa->flags = flags | IFA_F_TENTATIVE;
ifa->valid_lft = valid_lft;
ifa->prefered_lft = prefered_lft;
ifa->cstamp = ifa->tstamp = jiffies;
ifa->tokenized = false;
@ -1123,8 +1128,9 @@ retry:
ift = !max_addresses ||
ipv6_count_addresses(idev) < max_addresses ?
ipv6_add_addr(idev, &addr, tmp_plen, ipv6_addr_scope(&addr),
addr_flags) : NULL;
ipv6_add_addr(idev, &addr, NULL, tmp_plen,
ipv6_addr_scope(&addr), addr_flags,
tmp_valid_lft, tmp_prefered_lft) : NULL;
if (IS_ERR_OR_NULL(ift)) {
in6_ifa_put(ifp);
in6_dev_put(idev);
@ -1136,8 +1142,6 @@ retry:
spin_lock_bh(&ift->lock);
ift->ifpub = ifp;
ift->valid_lft = tmp_valid_lft;
ift->prefered_lft = tmp_prefered_lft;
ift->cstamp = now;
ift->tstamp = tmp_tstamp;
spin_unlock_bh(&ift->lock);
@ -2179,16 +2183,19 @@ ok:
*/
if (!max_addresses ||
ipv6_count_addresses(in6_dev) < max_addresses)
ifp = ipv6_add_addr(in6_dev, &addr, pinfo->prefix_len,
ifp = ipv6_add_addr(in6_dev, &addr, NULL,
pinfo->prefix_len,
addr_type&IPV6_ADDR_SCOPE_MASK,
addr_flags);
addr_flags, valid_lft,
prefered_lft);
if (IS_ERR_OR_NULL(ifp)) {
in6_dev_put(in6_dev);
return;
}
update_lft = create = 1;
update_lft = 0;
create = 1;
ifp->cstamp = jiffies;
ifp->tokenized = tokenized;
addrconf_dad_start(ifp);
@ -2209,7 +2216,7 @@ ok:
stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
else
stored_lft = 0;
if (!update_lft && stored_lft) {
if (!update_lft && !create && stored_lft) {
if (valid_lft > MIN_VALID_LIFETIME ||
valid_lft > stored_lft)
update_lft = 1;
@ -2455,17 +2462,10 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p
prefered_lft = timeout;
}
ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags);
ifp = ipv6_add_addr(idev, pfx, peer_pfx, plen, scope, ifa_flags,
valid_lft, prefered_lft);
if (!IS_ERR(ifp)) {
spin_lock_bh(&ifp->lock);
ifp->valid_lft = valid_lft;
ifp->prefered_lft = prefered_lft;
ifp->tstamp = jiffies;
if (peer_pfx)
ifp->peer_addr = *peer_pfx;
spin_unlock_bh(&ifp->lock);
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
expires, flags);
/*
@ -2557,7 +2557,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
{
struct inet6_ifaddr *ifp;
ifp = ipv6_add_addr(idev, addr, plen, scope, IFA_F_PERMANENT);
ifp = ipv6_add_addr(idev, addr, NULL, plen,
scope, IFA_F_PERMANENT, 0, 0);
if (!IS_ERR(ifp)) {
spin_lock_bh(&ifp->lock);
ifp->flags &= ~IFA_F_TENTATIVE;
@ -2683,7 +2684,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
#endif
ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags);
ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0);
if (!IS_ERR(ifp)) {
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
addrconf_dad_start(ifp);

View File

@ -1632,27 +1632,28 @@ static int fib6_age(struct rt6_info *rt, void *arg)
static DEFINE_SPINLOCK(fib6_gc_lock);
void fib6_run_gc(unsigned long expires, struct net *net)
void fib6_run_gc(unsigned long expires, struct net *net, bool force)
{
if (expires != ~0UL) {
unsigned long now;
if (force) {
spin_lock_bh(&fib6_gc_lock);
gc_args.timeout = expires ? (int)expires :
net->ipv6.sysctl.ip6_rt_gc_interval;
} else {
if (!spin_trylock_bh(&fib6_gc_lock)) {
mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
return;
}
gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval;
} else if (!spin_trylock_bh(&fib6_gc_lock)) {
mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
return;
}
gc_args.timeout = expires ? (int)expires :
net->ipv6.sysctl.ip6_rt_gc_interval;
gc_args.more = icmp6_dst_gc();
fib6_clean_all(net, fib6_age, 0, NULL);
now = jiffies;
net->ipv6.ip6_rt_last_gc = now;
if (gc_args.more)
mod_timer(&net->ipv6.ip6_fib_timer,
round_jiffies(jiffies
round_jiffies(now
+ net->ipv6.sysctl.ip6_rt_gc_interval));
else
del_timer(&net->ipv6.ip6_fib_timer);
@ -1661,7 +1662,7 @@ void fib6_run_gc(unsigned long expires, struct net *net)
static void fib6_gc_timer_cb(unsigned long arg)
{
fib6_run_gc(0, (struct net *)arg);
fib6_run_gc(0, (struct net *)arg, true);
}
static int __net_init fib6_net_init(struct net *net)

View File

@ -1576,7 +1576,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_CHANGEADDR:
neigh_changeaddr(&nd_tbl, dev);
fib6_run_gc(~0UL, net);
fib6_run_gc(0, net, false);
idev = in6_dev_get(dev);
if (!idev)
break;
@ -1586,7 +1586,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
break;
case NETDEV_DOWN:
neigh_ifdown(&nd_tbl, dev);
fib6_run_gc(~0UL, net);
fib6_run_gc(0, net, false);
break;
case NETDEV_NOTIFY_PEERS:
ndisc_send_unsol_na(dev);

View File

@ -1311,7 +1311,6 @@ static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
static int ip6_dst_gc(struct dst_ops *ops)
{
unsigned long now = jiffies;
struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
@ -1321,13 +1320,12 @@ static int ip6_dst_gc(struct dst_ops *ops)
int entries;
entries = dst_entries_get_fast(ops);
if (time_after(rt_last_gc + rt_min_interval, now) &&
if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
entries <= rt_max_size)
goto out;
net->ipv6.ip6_rt_gc_expire++;
fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
net->ipv6.ip6_rt_last_gc = now;
fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
entries = dst_entries_get_slow(ops);
if (entries < ops->gc_thresh)
net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
@ -2827,7 +2825,7 @@ int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
net = (struct net *)ctl->extra1;
delay = net->ipv6.sysctl.flush_delay;
proc_dointvec(ctl, write, buffer, lenp, ppos);
fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
return 0;
}

View File

@ -229,6 +229,10 @@ void ieee80211_mps_sta_status_update(struct sta_info *sta)
enum nl80211_mesh_power_mode pm;
bool do_buffer;
/* For non-assoc STA, prevent buffering or frame transmission */
if (sta->sta_state < IEEE80211_STA_ASSOC)
return;
/*
* use peer-specific power mode if peering is established and the
* peer's power mode is known

View File

@ -99,10 +99,13 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
}
mutex_unlock(&local->sta_mtx);
/* remove all interfaces */
/* remove all interfaces that were created in the driver */
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
if (!ieee80211_sdata_running(sdata) ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
sdata->vif.type == NL80211_IFTYPE_MONITOR)
continue;
drv_remove_interface(local, sdata);
}

View File

@ -691,8 +691,8 @@ static int netlbl_cipsov4_remove_cb(struct netlbl_dom_map *entry, void *arg)
{
struct netlbl_domhsh_walk_arg *cb_arg = arg;
if (entry->type == NETLBL_NLTYPE_CIPSOV4 &&
entry->type_def.cipsov4->doi == cb_arg->doi)
if (entry->def.type == NETLBL_NLTYPE_CIPSOV4 &&
entry->def.cipso->doi == cb_arg->doi)
return netlbl_domhsh_remove_entry(entry, cb_arg->audit_info);
return 0;

View File

@ -84,15 +84,15 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
#endif /* IPv6 */
ptr = container_of(entry, struct netlbl_dom_map, rcu);
if (ptr->type == NETLBL_NLTYPE_ADDRSELECT) {
if (ptr->def.type == NETLBL_NLTYPE_ADDRSELECT) {
netlbl_af4list_foreach_safe(iter4, tmp4,
&ptr->type_def.addrsel->list4) {
&ptr->def.addrsel->list4) {
netlbl_af4list_remove_entry(iter4);
kfree(netlbl_domhsh_addr4_entry(iter4));
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_safe(iter6, tmp6,
&ptr->type_def.addrsel->list6) {
&ptr->def.addrsel->list6) {
netlbl_af6list_remove_entry(iter6);
kfree(netlbl_domhsh_addr6_entry(iter6));
}
@ -213,21 +213,21 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
if (addr4 != NULL) {
struct netlbl_domaddr4_map *map4;
map4 = netlbl_domhsh_addr4_entry(addr4);
type = map4->type;
cipsov4 = map4->type_def.cipsov4;
type = map4->def.type;
cipsov4 = map4->def.cipso;
netlbl_af4list_audit_addr(audit_buf, 0, NULL,
addr4->addr, addr4->mask);
#if IS_ENABLED(CONFIG_IPV6)
} else if (addr6 != NULL) {
struct netlbl_domaddr6_map *map6;
map6 = netlbl_domhsh_addr6_entry(addr6);
type = map6->type;
type = map6->def.type;
netlbl_af6list_audit_addr(audit_buf, 0, NULL,
&addr6->addr, &addr6->mask);
#endif /* IPv6 */
} else {
type = entry->type;
cipsov4 = entry->type_def.cipsov4;
type = entry->def.type;
cipsov4 = entry->def.cipso;
}
switch (type) {
case NETLBL_NLTYPE_UNLABELED:
@ -265,26 +265,25 @@ static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
if (entry == NULL)
return -EINVAL;
switch (entry->type) {
switch (entry->def.type) {
case NETLBL_NLTYPE_UNLABELED:
if (entry->type_def.cipsov4 != NULL ||
entry->type_def.addrsel != NULL)
if (entry->def.cipso != NULL || entry->def.addrsel != NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_CIPSOV4:
if (entry->type_def.cipsov4 == NULL)
if (entry->def.cipso == NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_ADDRSELECT:
netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) {
netlbl_af4list_foreach(iter4, &entry->def.addrsel->list4) {
map4 = netlbl_domhsh_addr4_entry(iter4);
switch (map4->type) {
switch (map4->def.type) {
case NETLBL_NLTYPE_UNLABELED:
if (map4->type_def.cipsov4 != NULL)
if (map4->def.cipso != NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_CIPSOV4:
if (map4->type_def.cipsov4 == NULL)
if (map4->def.cipso == NULL)
return -EINVAL;
break;
default:
@ -292,9 +291,9 @@ static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
}
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) {
netlbl_af6list_foreach(iter6, &entry->def.addrsel->list6) {
map6 = netlbl_domhsh_addr6_entry(iter6);
switch (map6->type) {
switch (map6->def.type) {
case NETLBL_NLTYPE_UNLABELED:
break;
default:
@ -402,32 +401,31 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
rcu_assign_pointer(netlbl_domhsh_def, entry);
}
if (entry->type == NETLBL_NLTYPE_ADDRSELECT) {
if (entry->def.type == NETLBL_NLTYPE_ADDRSELECT) {
netlbl_af4list_foreach_rcu(iter4,
&entry->type_def.addrsel->list4)
&entry->def.addrsel->list4)
netlbl_domhsh_audit_add(entry, iter4, NULL,
ret_val, audit_info);
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6,
&entry->type_def.addrsel->list6)
&entry->def.addrsel->list6)
netlbl_domhsh_audit_add(entry, NULL, iter6,
ret_val, audit_info);
#endif /* IPv6 */
} else
netlbl_domhsh_audit_add(entry, NULL, NULL,
ret_val, audit_info);
} else if (entry_old->type == NETLBL_NLTYPE_ADDRSELECT &&
entry->type == NETLBL_NLTYPE_ADDRSELECT) {
} else if (entry_old->def.type == NETLBL_NLTYPE_ADDRSELECT &&
entry->def.type == NETLBL_NLTYPE_ADDRSELECT) {
struct list_head *old_list4;
struct list_head *old_list6;
old_list4 = &entry_old->type_def.addrsel->list4;
old_list6 = &entry_old->type_def.addrsel->list6;
old_list4 = &entry_old->def.addrsel->list4;
old_list6 = &entry_old->def.addrsel->list6;
/* we only allow the addition of address selectors if all of
* the selectors do not exist in the existing domain map */
netlbl_af4list_foreach_rcu(iter4,
&entry->type_def.addrsel->list4)
netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4)
if (netlbl_af4list_search_exact(iter4->addr,
iter4->mask,
old_list4)) {
@ -435,8 +433,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
goto add_return;
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6,
&entry->type_def.addrsel->list6)
netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6)
if (netlbl_af6list_search_exact(&iter6->addr,
&iter6->mask,
old_list6)) {
@ -446,7 +443,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
#endif /* IPv6 */
netlbl_af4list_foreach_safe(iter4, tmp4,
&entry->type_def.addrsel->list4) {
&entry->def.addrsel->list4) {
netlbl_af4list_remove_entry(iter4);
iter4->valid = 1;
ret_val = netlbl_af4list_add(iter4, old_list4);
@ -457,7 +454,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_safe(iter6, tmp6,
&entry->type_def.addrsel->list6) {
&entry->def.addrsel->list6) {
netlbl_af6list_remove_entry(iter6);
iter6->valid = 1;
ret_val = netlbl_af6list_add(iter6, old_list6);
@ -538,18 +535,18 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
struct netlbl_af4list *iter4;
struct netlbl_domaddr4_map *map4;
switch (entry->type) {
switch (entry->def.type) {
case NETLBL_NLTYPE_ADDRSELECT:
netlbl_af4list_foreach_rcu(iter4,
&entry->type_def.addrsel->list4) {
&entry->def.addrsel->list4) {
map4 = netlbl_domhsh_addr4_entry(iter4);
cipso_v4_doi_putdef(map4->type_def.cipsov4);
cipso_v4_doi_putdef(map4->def.cipso);
}
/* no need to check the IPv6 list since we currently
* support only unlabeled protocols for IPv6 */
break;
case NETLBL_NLTYPE_CIPSOV4:
cipso_v4_doi_putdef(entry->type_def.cipsov4);
cipso_v4_doi_putdef(entry->def.cipso);
break;
}
call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
@ -590,20 +587,21 @@ int netlbl_domhsh_remove_af4(const char *domain,
entry_map = netlbl_domhsh_search(domain);
else
entry_map = netlbl_domhsh_search_def(domain);
if (entry_map == NULL || entry_map->type != NETLBL_NLTYPE_ADDRSELECT)
if (entry_map == NULL ||
entry_map->def.type != NETLBL_NLTYPE_ADDRSELECT)
goto remove_af4_failure;
spin_lock(&netlbl_domhsh_lock);
entry_addr = netlbl_af4list_remove(addr->s_addr, mask->s_addr,
&entry_map->type_def.addrsel->list4);
&entry_map->def.addrsel->list4);
spin_unlock(&netlbl_domhsh_lock);
if (entry_addr == NULL)
goto remove_af4_failure;
netlbl_af4list_foreach_rcu(iter4, &entry_map->type_def.addrsel->list4)
netlbl_af4list_foreach_rcu(iter4, &entry_map->def.addrsel->list4)
goto remove_af4_single_addr;
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6, &entry_map->type_def.addrsel->list6)
netlbl_af6list_foreach_rcu(iter6, &entry_map->def.addrsel->list6)
goto remove_af4_single_addr;
#endif /* IPv6 */
/* the domain mapping is empty so remove it from the mapping table */
@ -616,7 +614,7 @@ remove_af4_single_addr:
* shouldn't be a problem */
synchronize_rcu();
entry = netlbl_domhsh_addr4_entry(entry_addr);
cipso_v4_doi_putdef(entry->type_def.cipsov4);
cipso_v4_doi_putdef(entry->def.cipso);
kfree(entry);
return 0;
@ -693,8 +691,8 @@ struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain)
* responsible for ensuring that rcu_read_[un]lock() is called.
*
*/
struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
__be32 addr)
struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain,
__be32 addr)
{
struct netlbl_dom_map *dom_iter;
struct netlbl_af4list *addr_iter;
@ -702,15 +700,13 @@ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
dom_iter = netlbl_domhsh_search_def(domain);
if (dom_iter == NULL)
return NULL;
if (dom_iter->type != NETLBL_NLTYPE_ADDRSELECT)
return NULL;
addr_iter = netlbl_af4list_search(addr,
&dom_iter->type_def.addrsel->list4);
if (dom_iter->def.type != NETLBL_NLTYPE_ADDRSELECT)
return &dom_iter->def;
addr_iter = netlbl_af4list_search(addr, &dom_iter->def.addrsel->list4);
if (addr_iter == NULL)
return NULL;
return netlbl_domhsh_addr4_entry(addr_iter);
return &(netlbl_domhsh_addr4_entry(addr_iter)->def);
}
#if IS_ENABLED(CONFIG_IPV6)
@ -725,7 +721,7 @@ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
* responsible for ensuring that rcu_read_[un]lock() is called.
*
*/
struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain,
const struct in6_addr *addr)
{
struct netlbl_dom_map *dom_iter;
@ -734,15 +730,13 @@ struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
dom_iter = netlbl_domhsh_search_def(domain);
if (dom_iter == NULL)
return NULL;
if (dom_iter->type != NETLBL_NLTYPE_ADDRSELECT)
return NULL;
addr_iter = netlbl_af6list_search(addr,
&dom_iter->type_def.addrsel->list6);
if (dom_iter->def.type != NETLBL_NLTYPE_ADDRSELECT)
return &dom_iter->def;
addr_iter = netlbl_af6list_search(addr, &dom_iter->def.addrsel->list6);
if (addr_iter == NULL)
return NULL;
return netlbl_domhsh_addr6_entry(addr_iter);
return &(netlbl_domhsh_addr6_entry(addr_iter)->def);
}
#endif /* IPv6 */

View File

@ -43,37 +43,35 @@
#define NETLBL_DOMHSH_BITSIZE 7
/* Domain mapping definition structures */
struct netlbl_domaddr_map {
struct list_head list4;
struct list_head list6;
};
struct netlbl_dommap_def {
u32 type;
union {
struct netlbl_domaddr_map *addrsel;
struct cipso_v4_doi *cipso;
};
};
#define netlbl_domhsh_addr4_entry(iter) \
container_of(iter, struct netlbl_domaddr4_map, list)
struct netlbl_domaddr4_map {
u32 type;
union {
struct cipso_v4_doi *cipsov4;
} type_def;
struct netlbl_dommap_def def;
struct netlbl_af4list list;
};
#define netlbl_domhsh_addr6_entry(iter) \
container_of(iter, struct netlbl_domaddr6_map, list)
struct netlbl_domaddr6_map {
u32 type;
/* NOTE: no 'type_def' union needed at present since we don't currently
* support any IPv6 labeling protocols */
struct netlbl_dommap_def def;
struct netlbl_af6list list;
};
struct netlbl_domaddr_map {
struct list_head list4;
struct list_head list6;
};
struct netlbl_dom_map {
char *domain;
u32 type;
union {
struct cipso_v4_doi *cipsov4;
struct netlbl_domaddr_map *addrsel;
} type_def;
struct netlbl_dommap_def def;
u32 valid;
struct list_head list;
@ -97,16 +95,16 @@ int netlbl_domhsh_remove_af4(const char *domain,
int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info);
int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info);
struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain);
struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
__be32 addr);
struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain,
__be32 addr);
#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain,
const struct in6_addr *addr);
#endif /* IPv6 */
int netlbl_domhsh_walk(u32 *skip_bkt,
u32 *skip_chain,
int (*callback) (struct netlbl_dom_map *entry, void *arg),
void *cb_arg);
#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
const struct in6_addr *addr);
#endif /* IPv6 */
#endif

View File

@ -122,7 +122,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
}
if (addr == NULL && mask == NULL)
entry->type = NETLBL_NLTYPE_UNLABELED;
entry->def.type = NETLBL_NLTYPE_UNLABELED;
else if (addr != NULL && mask != NULL) {
addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
if (addrmap == NULL)
@ -137,7 +137,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
if (map4 == NULL)
goto cfg_unlbl_map_add_failure;
map4->type = NETLBL_NLTYPE_UNLABELED;
map4->def.type = NETLBL_NLTYPE_UNLABELED;
map4->list.addr = addr4->s_addr & mask4->s_addr;
map4->list.mask = mask4->s_addr;
map4->list.valid = 1;
@ -154,7 +154,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
if (map6 == NULL)
goto cfg_unlbl_map_add_failure;
map6->type = NETLBL_NLTYPE_UNLABELED;
map6->def.type = NETLBL_NLTYPE_UNLABELED;
map6->list.addr = *addr6;
map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0];
map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1];
@ -174,8 +174,8 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
break;
}
entry->type_def.addrsel = addrmap;
entry->type = NETLBL_NLTYPE_ADDRSELECT;
entry->def.addrsel = addrmap;
entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
} else {
ret_val = -EINVAL;
goto cfg_unlbl_map_add_failure;
@ -355,8 +355,8 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
}
if (addr == NULL && mask == NULL) {
entry->type_def.cipsov4 = doi_def;
entry->type = NETLBL_NLTYPE_CIPSOV4;
entry->def.cipso = doi_def;
entry->def.type = NETLBL_NLTYPE_CIPSOV4;
} else if (addr != NULL && mask != NULL) {
addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
if (addrmap == NULL)
@ -367,8 +367,8 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC);
if (addrinfo == NULL)
goto out_addrinfo;
addrinfo->type_def.cipsov4 = doi_def;
addrinfo->type = NETLBL_NLTYPE_CIPSOV4;
addrinfo->def.cipso = doi_def;
addrinfo->def.type = NETLBL_NLTYPE_CIPSOV4;
addrinfo->list.addr = addr->s_addr & mask->s_addr;
addrinfo->list.mask = mask->s_addr;
addrinfo->list.valid = 1;
@ -376,8 +376,8 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
if (ret_val != 0)
goto cfg_cipsov4_map_add_failure;
entry->type_def.addrsel = addrmap;
entry->type = NETLBL_NLTYPE_ADDRSELECT;
entry->def.addrsel = addrmap;
entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
} else {
ret_val = -EINVAL;
goto out_addrmap;
@ -657,14 +657,14 @@ int netlbl_sock_setattr(struct sock *sk,
}
switch (family) {
case AF_INET:
switch (dom_entry->type) {
switch (dom_entry->def.type) {
case NETLBL_NLTYPE_ADDRSELECT:
ret_val = -EDESTADDRREQ;
break;
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_sock_setattr(sk,
dom_entry->type_def.cipsov4,
secattr);
dom_entry->def.cipso,
secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
ret_val = 0;
@ -754,23 +754,22 @@ int netlbl_conn_setattr(struct sock *sk,
{
int ret_val;
struct sockaddr_in *addr4;
struct netlbl_domaddr4_map *af4_entry;
struct netlbl_dommap_def *entry;
rcu_read_lock();
switch (addr->sa_family) {
case AF_INET:
addr4 = (struct sockaddr_in *)addr;
af4_entry = netlbl_domhsh_getentry_af4(secattr->domain,
addr4->sin_addr.s_addr);
if (af4_entry == NULL) {
entry = netlbl_domhsh_getentry_af4(secattr->domain,
addr4->sin_addr.s_addr);
if (entry == NULL) {
ret_val = -ENOENT;
goto conn_setattr_return;
}
switch (af4_entry->type) {
switch (entry->type) {
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_sock_setattr(sk,
af4_entry->type_def.cipsov4,
secattr);
entry->cipso, secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now
@ -812,36 +811,21 @@ int netlbl_req_setattr(struct request_sock *req,
const struct netlbl_lsm_secattr *secattr)
{
int ret_val;
struct netlbl_dom_map *dom_entry;
struct netlbl_domaddr4_map *af4_entry;
u32 proto_type;
struct cipso_v4_doi *proto_cv4;
struct netlbl_dommap_def *entry;
rcu_read_lock();
dom_entry = netlbl_domhsh_getentry(secattr->domain);
if (dom_entry == NULL) {
ret_val = -ENOENT;
goto req_setattr_return;
}
switch (req->rsk_ops->family) {
case AF_INET:
if (dom_entry->type == NETLBL_NLTYPE_ADDRSELECT) {
struct inet_request_sock *req_inet = inet_rsk(req);
af4_entry = netlbl_domhsh_getentry_af4(secattr->domain,
req_inet->rmt_addr);
if (af4_entry == NULL) {
ret_val = -ENOENT;
goto req_setattr_return;
}
proto_type = af4_entry->type;
proto_cv4 = af4_entry->type_def.cipsov4;
} else {
proto_type = dom_entry->type;
proto_cv4 = dom_entry->type_def.cipsov4;
entry = netlbl_domhsh_getentry_af4(secattr->domain,
inet_rsk(req)->rmt_addr);
if (entry == NULL) {
ret_val = -ENOENT;
goto req_setattr_return;
}
switch (proto_type) {
switch (entry->type) {
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_req_setattr(req, proto_cv4, secattr);
ret_val = cipso_v4_req_setattr(req,
entry->cipso, secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now
@ -899,23 +883,21 @@ int netlbl_skbuff_setattr(struct sk_buff *skb,
{
int ret_val;
struct iphdr *hdr4;
struct netlbl_domaddr4_map *af4_entry;
struct netlbl_dommap_def *entry;
rcu_read_lock();
switch (family) {
case AF_INET:
hdr4 = ip_hdr(skb);
af4_entry = netlbl_domhsh_getentry_af4(secattr->domain,
hdr4->daddr);
if (af4_entry == NULL) {
entry = netlbl_domhsh_getentry_af4(secattr->domain,hdr4->daddr);
if (entry == NULL) {
ret_val = -ENOENT;
goto skbuff_setattr_return;
}
switch (af4_entry->type) {
switch (entry->type) {
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_skbuff_setattr(skb,
af4_entry->type_def.cipsov4,
secattr);
ret_val = cipso_v4_skbuff_setattr(skb, entry->cipso,
secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now

View File

@ -104,7 +104,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
ret_val = -ENOMEM;
goto add_failure;
}
entry->type = nla_get_u32(info->attrs[NLBL_MGMT_A_PROTOCOL]);
entry->def.type = nla_get_u32(info->attrs[NLBL_MGMT_A_PROTOCOL]);
if (info->attrs[NLBL_MGMT_A_DOMAIN]) {
size_t tmp_size = nla_len(info->attrs[NLBL_MGMT_A_DOMAIN]);
entry->domain = kmalloc(tmp_size, GFP_KERNEL);
@ -116,12 +116,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
info->attrs[NLBL_MGMT_A_DOMAIN], tmp_size);
}
/* NOTE: internally we allow/use a entry->type value of
/* NOTE: internally we allow/use a entry->def.type value of
* NETLBL_NLTYPE_ADDRSELECT but we don't currently allow users
* to pass that as a protocol value because we need to know the
* "real" protocol */
switch (entry->type) {
switch (entry->def.type) {
case NETLBL_NLTYPE_UNLABELED:
break;
case NETLBL_NLTYPE_CIPSOV4:
@ -132,7 +132,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
cipsov4 = cipso_v4_doi_getdef(tmp_val);
if (cipsov4 == NULL)
goto add_failure;
entry->type_def.cipsov4 = cipsov4;
entry->def.cipso = cipsov4;
break;
default:
goto add_failure;
@ -172,9 +172,9 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
map->list.addr = addr->s_addr & mask->s_addr;
map->list.mask = mask->s_addr;
map->list.valid = 1;
map->type = entry->type;
map->def.type = entry->def.type;
if (cipsov4)
map->type_def.cipsov4 = cipsov4;
map->def.cipso = cipsov4;
ret_val = netlbl_af4list_add(&map->list, &addrmap->list4);
if (ret_val != 0) {
@ -182,8 +182,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
goto add_failure;
}
entry->type = NETLBL_NLTYPE_ADDRSELECT;
entry->type_def.addrsel = addrmap;
entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
entry->def.addrsel = addrmap;
#if IS_ENABLED(CONFIG_IPV6)
} else if (info->attrs[NLBL_MGMT_A_IPV6ADDR]) {
struct in6_addr *addr;
@ -223,7 +223,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
map->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
map->list.mask = *mask;
map->list.valid = 1;
map->type = entry->type;
map->def.type = entry->def.type;
ret_val = netlbl_af6list_add(&map->list, &addrmap->list6);
if (ret_val != 0) {
@ -231,8 +231,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
goto add_failure;
}
entry->type = NETLBL_NLTYPE_ADDRSELECT;
entry->type_def.addrsel = addrmap;
entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
entry->def.addrsel = addrmap;
#endif /* IPv6 */
}
@ -281,14 +281,13 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
return ret_val;
}
switch (entry->type) {
switch (entry->def.type) {
case NETLBL_NLTYPE_ADDRSELECT:
nla_a = nla_nest_start(skb, NLBL_MGMT_A_SELECTORLIST);
if (nla_a == NULL)
return -ENOMEM;
netlbl_af4list_foreach_rcu(iter4,
&entry->type_def.addrsel->list4) {
netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) {
struct netlbl_domaddr4_map *map4;
struct in_addr addr_struct;
@ -310,13 +309,13 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
return ret_val;
map4 = netlbl_domhsh_addr4_entry(iter4);
ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL,
map4->type);
map4->def.type);
if (ret_val != 0)
return ret_val;
switch (map4->type) {
switch (map4->def.type) {
case NETLBL_NLTYPE_CIPSOV4:
ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI,
map4->type_def.cipsov4->doi);
map4->def.cipso->doi);
if (ret_val != 0)
return ret_val;
break;
@ -325,8 +324,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
nla_nest_end(skb, nla_b);
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6,
&entry->type_def.addrsel->list6) {
netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) {
struct netlbl_domaddr6_map *map6;
nla_b = nla_nest_start(skb, NLBL_MGMT_A_ADDRSELECTOR);
@ -345,7 +343,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
return ret_val;
map6 = netlbl_domhsh_addr6_entry(iter6);
ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL,
map6->type);
map6->def.type);
if (ret_val != 0)
return ret_val;
@ -356,14 +354,14 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
nla_nest_end(skb, nla_a);
break;
case NETLBL_NLTYPE_UNLABELED:
ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, entry->type);
ret_val = nla_put_u32(skb,NLBL_MGMT_A_PROTOCOL,entry->def.type);
break;
case NETLBL_NLTYPE_CIPSOV4:
ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, entry->type);
ret_val = nla_put_u32(skb,NLBL_MGMT_A_PROTOCOL,entry->def.type);
if (ret_val != 0)
return ret_val;
ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI,
entry->type_def.cipsov4->doi);
entry->def.cipso->doi);
break;
}

View File

@ -1541,7 +1541,7 @@ int __init netlbl_unlabel_defconf(void)
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (entry == NULL)
return -ENOMEM;
entry->type = NETLBL_NLTYPE_UNLABELED;
entry->def.type = NETLBL_NLTYPE_UNLABELED;
ret_val = netlbl_domhsh_add_default(entry, &audit_info);
if (ret_val != 0)
return ret_val;

View File

@ -44,7 +44,7 @@ DEFINE_MUTEX(nfc_devlist_mutex);
/* NFC device ID bitmap */
static DEFINE_IDA(nfc_index_ida);
int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name)
int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name)
{
int rc = 0;
@ -62,28 +62,28 @@ int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name)
goto error;
}
if (!dev->ops->fw_upload) {
if (!dev->ops->fw_download) {
rc = -EOPNOTSUPP;
goto error;
}
dev->fw_upload_in_progress = true;
rc = dev->ops->fw_upload(dev, firmware_name);
dev->fw_download_in_progress = true;
rc = dev->ops->fw_download(dev, firmware_name);
if (rc)
dev->fw_upload_in_progress = false;
dev->fw_download_in_progress = false;
error:
device_unlock(&dev->dev);
return rc;
}
int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name)
int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name)
{
dev->fw_upload_in_progress = false;
dev->fw_download_in_progress = false;
return nfc_genl_fw_upload_done(dev, firmware_name);
return nfc_genl_fw_download_done(dev, firmware_name);
}
EXPORT_SYMBOL(nfc_fw_upload_done);
EXPORT_SYMBOL(nfc_fw_download_done);
/**
* nfc_dev_up - turn on the NFC device
@ -110,7 +110,7 @@ int nfc_dev_up(struct nfc_dev *dev)
goto error;
}
if (dev->fw_upload_in_progress) {
if (dev->fw_download_in_progress) {
rc = -EBUSY;
goto error;
}

View File

@ -809,14 +809,14 @@ static void nfc_hci_recv_from_llc(struct nfc_hci_dev *hdev, struct sk_buff *skb)
}
}
static int hci_fw_upload(struct nfc_dev *nfc_dev, const char *firmware_name)
static int hci_fw_download(struct nfc_dev *nfc_dev, const char *firmware_name)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
if (!hdev->ops->fw_upload)
if (!hdev->ops->fw_download)
return -ENOTSUPP;
return hdev->ops->fw_upload(hdev, firmware_name);
return hdev->ops->fw_download(hdev, firmware_name);
}
static struct nfc_ops hci_nfc_ops = {
@ -831,7 +831,7 @@ static struct nfc_ops hci_nfc_ops = {
.im_transceive = hci_transceive,
.tm_send = hci_tm_send,
.check_presence = hci_check_presence,
.fw_upload = hci_fw_upload,
.fw_download = hci_fw_download,
.discover_se = hci_discover_se,
.enable_se = hci_enable_se,
.disable_se = hci_disable_se,

View File

@ -11,6 +11,7 @@ config NFC_NCI
config NFC_NCI_SPI
depends on NFC_NCI && SPI
select CRC_CCITT
bool "NCI over SPI protocol support"
default n
help

View File

@ -1089,7 +1089,7 @@ exit:
return rc;
}
static int nfc_genl_fw_upload(struct sk_buff *skb, struct genl_info *info)
static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
int rc;
@ -1108,13 +1108,13 @@ static int nfc_genl_fw_upload(struct sk_buff *skb, struct genl_info *info)
nla_strlcpy(firmware_name, info->attrs[NFC_ATTR_FIRMWARE_NAME],
sizeof(firmware_name));
rc = nfc_fw_upload(dev, firmware_name);
rc = nfc_fw_download(dev, firmware_name);
nfc_put_device(dev);
return rc;
}
int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name)
int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name)
{
struct sk_buff *msg;
void *hdr;
@ -1124,7 +1124,7 @@ int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
NFC_CMD_FW_UPLOAD);
NFC_CMD_FW_DOWNLOAD);
if (!hdr)
goto free_msg;
@ -1251,8 +1251,8 @@ static struct genl_ops nfc_genl_ops[] = {
.policy = nfc_genl_policy,
},
{
.cmd = NFC_CMD_FW_UPLOAD,
.doit = nfc_genl_fw_upload,
.cmd = NFC_CMD_FW_DOWNLOAD,
.doit = nfc_genl_fw_download,
.policy = nfc_genl_policy,
},
{

View File

@ -123,10 +123,10 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
class_dev_iter_exit(iter);
}
int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name);
int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name);
int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name);
int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name);
int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name);
int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name);
int nfc_dev_up(struct nfc_dev *dev);

View File

@ -605,6 +605,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
struct sockaddr_atmpvc pvc;
int state;
memset(&pvc, 0, sizeof(pvc));
pvc.sap_family = AF_ATMPVC;
pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
pvc.sap_addr.vpi = flow->vcc->vpi;

View File

@ -100,7 +100,7 @@ struct htb_class {
struct psched_ratecfg ceil;
s64 buffer, cbuffer;/* token bucket depth/rate */
s64 mbuffer; /* max wait time */
int prio; /* these two are used only by leaves... */
u32 prio; /* these two are used only by leaves... */
int quantum; /* but stored for parent-to-leaf return */
struct tcf_proto *filter_list; /* class attached filters */

View File

@ -106,7 +106,7 @@
#include <linux/atalk.h>
#include <net/busy_poll.h>
#ifdef CONFIG_NET_LL_RX_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly;
#endif

View File

@ -355,8 +355,12 @@ static int tipc_open_listening_sock(struct tipc_server *s)
return PTR_ERR(con);
sock = tipc_create_listen_sock(con);
if (!sock)
if (!sock) {
idr_remove(&s->conn_idr, con->conid);
s->idr_in_use--;
kfree(con);
return -EINVAL;
}
tipc_register_callbacks(sock, con);
return 0;
@ -563,9 +567,14 @@ int tipc_server_start(struct tipc_server *s)
kmem_cache_destroy(s->rcvbuf_cache);
return ret;
}
ret = tipc_open_listening_sock(s);
if (ret < 0) {
tipc_work_stop(s);
kmem_cache_destroy(s->rcvbuf_cache);
return ret;
}
s->enabled = 1;
return tipc_open_listening_sock(s);
return ret;
}
void tipc_server_stop(struct tipc_server *s)

View File

@ -2247,10 +2247,13 @@ int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
void wiphy_regulatory_register(struct wiphy *wiphy)
{
struct regulatory_request *lr;
if (!reg_dev_ignore_cell_hint(wiphy))
reg_num_devs_support_basehint++;
wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
lr = get_last_request();
wiphy_update_regulatory(wiphy, lr->initiator);
}
void wiphy_regulatory_deregister(struct wiphy *wiphy)